Hello community,
here is the log from the commit of package kernel-source
checked in at Sun Jul 22 12:00:39 CEST 2007.
--------
--- kernel-source/kernel-bigsmp.changes 2007-07-21 18:09:13.000000000 +0200
+++ /mounts/work_src_done/STABLE/kernel-source/kernel-bigsmp.changes 2007-07-22 11:56:54.000000000 +0200
@@ -1,0 +2,34 @@
+Sun Jul 22 03:02:03 CEST 2007 - sdietrich@suse.de
+
+- Update RT config files. (add CONFIG_PREEMPT_HOOKS=y)
+
+-------------------------------------------------------------------
+Sun Jul 22 01:46:44 CEST 2007 - sdietrich@suse.de
+
+- Update config files. (add CONFIG_UIO=m)
+
+-------------------------------------------------------------------
+Sun Jul 22 01:38:54 CEST 2007 - sdietrich@suse.de
+
+- patches.rt/preempt_sched_hooks.patch:
+ SCHED: Generic hooks for trapping taskpreemption in KVM
+ (this is needed to run KVM under RT)
+ (<11841693332609-git-send-email-avi@qumranet.com>).
+- series.conf (rt config only):
+ - add preempt_sched_hooks.patch
+ - suppress patches.suse/sysctl-add-affinity_load_balancing
+- patches.rt/patch-2.6.22.1-rt4.openSUSE: reconstitute
+
+-------------------------------------------------------------------
+Sat Jul 21 18:34:36 CEST 2007 - jeffm@suse.de
+
+- Update config files for -rt.
+
+-------------------------------------------------------------------
+Sat Jul 21 05:24:00 CEST 2007 - gregkh@suse.de
+
+- Update config files.
+- patches.drivers/uio-documentation.patch: UIO: Documentation.
+- patches.drivers/uio.patch: UIO: Add the User IO core code.
+
+-------------------------------------------------------------------
kernel-debug.changes: same change
kernel-default.changes: same change
kernel-dummy.changes: same change
kernel-kdump.changes: same change
kernel-ppc64.changes: same change
kernel-rt.changes: same change
kernel-rt_debug.changes: same change
kernel-s390.changes: same change
kernel-source.changes: same change
kernel-syms.changes: same change
kernel-vanilla.changes: same change
kernel-xen.changes: same change
kernel-xenpae.changes: same change
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ kernel-bigsmp.spec ++++++
++++ 18002 lines (skipped)
++++ between kernel-source/kernel-bigsmp.spec
++++ and /mounts/work_src_done/STABLE/kernel-source/kernel-bigsmp.spec
kernel-debug.spec: same change
kernel-default.spec: same change
kernel-dummy.spec: same change
kernel-kdump.spec: same change
kernel-ppc64.spec: same change
kernel-rt_debug.spec: same change
kernel-rt.spec: same change
kernel-s390.spec: same change
kernel-source.spec: same change
kernel-syms.spec: same change
kernel-vanilla.spec: same change
kernel-xenpae.spec: same change
kernel-xen.spec: same change
++++++ build-source-timestamp ++++++
--- kernel-source/build-source-timestamp 2007-07-21 18:09:13.000000000 +0200
+++ /mounts/work_src_done/STABLE/kernel-source/build-source-timestamp 2007-07-22 11:56:49.000000000 +0200
@@ -1 +1 @@
-2007/07/18 10:28:45 UTC
+2007/07/22 01:02:40 UTC
++++++ config.tar.bz2 ++++++
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/alpha/default new/config/alpha/default
--- old/config/alpha/default 2007-07-15 12:35:56.000000000 +0200
+++ new/config/alpha/default 2007-07-21 18:49:01.000000000 +0200
@@ -953,7 +953,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_IDE_MAX_HWIFS=4
CONFIG_BLK_DEV_IDE=y
@@ -3108,6 +3107,11 @@
# CONFIG_KS0108 is not set
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/alpha/vanilla new/config/alpha/vanilla
--- old/config/alpha/vanilla 2007-07-15 12:35:56.000000000 +0200
+++ new/config/alpha/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -952,7 +952,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_IDE_MAX_HWIFS=4
CONFIG_BLK_DEV_IDE=y
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/bigsmp new/config/i386/bigsmp
--- old/config/i386/bigsmp 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/bigsmp 2007-07-21 18:49:01.000000000 +0200
@@ -1183,7 +1183,6 @@
# CONFIG_MSI_LAPTOP is not set
# CONFIG_SONY_LAPTOP is not set
# CONFIG_THINKPAD_ACPI is not set
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3503,6 +3502,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/debug new/config/i386/debug
--- old/config/i386/debug 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/debug 2007-07-21 18:49:01.000000000 +0200
@@ -1189,7 +1189,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3494,6 +3493,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/default new/config/i386/default
--- old/config/i386/default 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/default 2007-07-21 18:49:01.000000000 +0200
@@ -1186,7 +1186,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3492,6 +3491,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/rt new/config/i386/rt
--- old/config/i386/rt 2007-07-18 10:52:20.000000000 +0200
+++ new/config/i386/rt 2007-07-22 11:56:21.000000000 +0200
@@ -190,6 +190,7 @@
CONFIG_PREEMPT=y
CONFIG_PREEMPT_SOFTIRQS=y
CONFIG_PREEMPT_HARDIRQS=y
+CONFIG_PREEMPT_HOOKS=y
CONFIG_PREEMPT_BKL=y
# CONFIG_CLASSIC_RCU is not set
CONFIG_PREEMPT_RCU=y
@@ -3350,3 +3351,4 @@
CONFIG_X86_BIOS_REBOOT=y
CONFIG_X86_TRAMPOLINE=y
CONFIG_KTIME_SCALAR=y
+CONFIG_UIO=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/rt_debug new/config/i386/rt_debug
--- old/config/i386/rt_debug 2007-07-18 10:52:20.000000000 +0200
+++ new/config/i386/rt_debug 2007-07-22 11:56:21.000000000 +0200
@@ -191,6 +191,7 @@
CONFIG_PREEMPT=y
CONFIG_PREEMPT_SOFTIRQS=y
CONFIG_PREEMPT_HARDIRQS=y
+CONFIG_PREEMPT_HOOKS=y
CONFIG_PREEMPT_BKL=y
# CONFIG_CLASSIC_RCU is not set
CONFIG_PREEMPT_RCU=y
@@ -3073,6 +3074,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/vanilla new/config/i386/vanilla
--- old/config/i386/vanilla 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -1180,7 +1180,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/xen new/config/i386/xen
--- old/config/i386/xen 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/xen 2007-07-21 18:49:01.000000000 +0200
@@ -1084,7 +1084,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3205,6 +3204,11 @@
#
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/i386/xenpae new/config/i386/xenpae
--- old/config/i386/xenpae 2007-07-15 12:35:56.000000000 +0200
+++ new/config/i386/xenpae 2007-07-21 18:49:01.000000000 +0200
@@ -1085,7 +1085,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3219,6 +3218,11 @@
#
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/ia64/debug new/config/ia64/debug
--- old/config/ia64/debug 2007-07-15 12:35:56.000000000 +0200
+++ new/config/ia64/debug 2007-07-21 18:49:01.000000000 +0200
@@ -972,7 +972,6 @@
CONFIG_SGI_IOC4=m
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_IDE_MAX_HWIFS=10
CONFIG_BLK_DEV_IDE=m
@@ -2908,6 +2907,11 @@
# Auxiliary Display support
#
# CONFIG_KS0108 is not set
+
+#
+# Userspace I/O
+#
+CONFIG_UIO=m
CONFIG_MSPEC=m
#
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/ia64/default new/config/ia64/default
--- old/config/ia64/default 2007-07-15 12:35:56.000000000 +0200
+++ new/config/ia64/default 2007-07-21 18:49:01.000000000 +0200
@@ -970,7 +970,6 @@
CONFIG_SGI_IOC4=m
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_IDE_MAX_HWIFS=10
CONFIG_BLK_DEV_IDE=m
@@ -2905,6 +2904,11 @@
# Auxiliary Display support
#
# CONFIG_KS0108 is not set
+
+#
+# Userspace I/O
+#
+CONFIG_UIO=m
CONFIG_MSPEC=m
#
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/ia64/vanilla new/config/ia64/vanilla
--- old/config/ia64/vanilla 2007-07-15 12:35:56.000000000 +0200
+++ new/config/ia64/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -953,7 +953,6 @@
CONFIG_SGI_IOC4=m
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_IDE_MAX_HWIFS=10
CONFIG_BLK_DEV_IDE=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/powerpc/default new/config/powerpc/default
--- old/config/powerpc/default 2007-07-15 12:35:56.000000000 +0200
+++ new/config/powerpc/default 2007-07-21 18:49:01.000000000 +0200
@@ -827,7 +827,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
@@ -2493,6 +2492,11 @@
# CONFIG_KS0108 is not set
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=y
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/powerpc/kdump new/config/powerpc/kdump
--- old/config/powerpc/kdump 2007-07-15 12:35:57.000000000 +0200
+++ new/config/powerpc/kdump 2007-07-21 18:49:01.000000000 +0200
@@ -693,7 +693,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
@@ -1793,6 +1792,11 @@
# CONFIG_KS0108 is not set
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=y
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/powerpc/ppc64 new/config/powerpc/ppc64
--- old/config/powerpc/ppc64 2007-07-15 12:35:57.000000000 +0200
+++ new/config/powerpc/ppc64 2007-07-21 18:49:01.000000000 +0200
@@ -775,7 +775,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
@@ -2274,6 +2273,11 @@
# CONFIG_KS0108 is not set
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# File systems
#
CONFIG_EXT2_FS=y
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/powerpc/vanilla new/config/powerpc/vanilla
--- old/config/powerpc/vanilla 2007-07-15 12:35:57.000000000 +0200
+++ new/config/powerpc/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -825,7 +825,6 @@
# CONFIG_SGI_IOC4 is not set
CONFIG_TIFM_CORE=m
CONFIG_TIFM_7XX1=m
-CONFIG_BLINK=m
CONFIG_IDE=y
CONFIG_BLK_DEV_IDE=y
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/s390/default new/config/s390/default
--- old/config/s390/default 2007-07-15 12:35:57.000000000 +0200
+++ new/config/s390/default 2007-07-21 18:49:01.000000000 +0200
@@ -605,7 +605,6 @@
#
# Misc devices
#
-CONFIG_BLINK=m
#
# SCSI device support
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/s390/s390 new/config/s390/s390
--- old/config/s390/s390 2007-07-15 12:35:57.000000000 +0200
+++ new/config/s390/s390 2007-07-21 18:49:01.000000000 +0200
@@ -601,7 +601,6 @@
#
# Misc devices
#
-CONFIG_BLINK=m
#
# SCSI device support
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/s390/vanilla new/config/s390/vanilla
--- old/config/s390/vanilla 2007-07-15 12:35:57.000000000 +0200
+++ new/config/s390/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -604,7 +604,6 @@
#
# Misc devices
#
-CONFIG_BLINK=m
#
# SCSI device support
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/debug new/config/x86_64/debug
--- old/config/x86_64/debug 2007-07-15 12:35:57.000000000 +0200
+++ new/config/x86_64/debug 2007-07-21 18:49:01.000000000 +0200
@@ -1101,7 +1101,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3148,6 +3147,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# Firmware Drivers
#
CONFIG_EDD=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/default new/config/x86_64/default
--- old/config/x86_64/default 2007-07-15 12:35:57.000000000 +0200
+++ new/config/x86_64/default 2007-07-21 18:49:01.000000000 +0200
@@ -1101,7 +1101,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3156,6 +3155,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# Firmware Drivers
#
CONFIG_EDD=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/rt new/config/x86_64/rt
--- old/config/x86_64/rt 2007-07-18 10:52:20.000000000 +0200
+++ new/config/x86_64/rt 2007-07-22 11:56:21.000000000 +0200
@@ -158,6 +158,7 @@
CONFIG_PREEMPT=y
CONFIG_PREEMPT_SOFTIRQS=y
CONFIG_PREEMPT_HARDIRQS=y
+CONFIG_PREEMPT_HOOKS=y
CONFIG_PREEMPT_BKL=y
# CONFIG_CLASSIC_RCU is not set
CONFIG_PREEMPT_RCU=y
@@ -3237,3 +3238,4 @@
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HAS_DMA=y
+CONFIG_UIO=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/rt_debug new/config/x86_64/rt_debug
--- old/config/x86_64/rt_debug 2007-07-18 10:52:20.000000000 +0200
+++ new/config/x86_64/rt_debug 2007-07-22 11:56:21.000000000 +0200
@@ -159,6 +159,7 @@
CONFIG_PREEMPT=y
CONFIG_PREEMPT_SOFTIRQS=y
CONFIG_PREEMPT_HARDIRQS=y
+CONFIG_PREEMPT_HOOKS=y
CONFIG_PREEMPT_BKL=y
# CONFIG_CLASSIC_RCU is not set
CONFIG_PREEMPT_RCU=y
@@ -2960,6 +2961,11 @@
CONFIG_KVM_AMD=m
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# Firmware Drivers
#
CONFIG_EDD=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/vanilla new/config/x86_64/vanilla
--- old/config/x86_64/vanilla 2007-07-15 12:35:57.000000000 +0200
+++ new/config/x86_64/vanilla 2007-07-21 18:49:01.000000000 +0200
@@ -1097,7 +1097,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/config/x86_64/xen new/config/x86_64/xen
--- old/config/x86_64/xen 2007-07-15 12:35:57.000000000 +0200
+++ new/config/x86_64/xen 2007-07-21 18:49:01.000000000 +0200
@@ -1038,7 +1038,6 @@
CONFIG_THINKPAD_ACPI=m
# CONFIG_THINKPAD_ACPI_DEBUG is not set
CONFIG_THINKPAD_ACPI_BAY=y
-CONFIG_BLINK=m
CONFIG_IDE=m
CONFIG_BLK_DEV_IDE=m
@@ -3080,6 +3079,11 @@
#
#
+# Userspace I/O
+#
+CONFIG_UIO=m
+
+#
# Firmware Drivers
#
CONFIG_EDD=m
++++++ needed_space_in_mb ++++++
--- kernel-source/needed_space_in_mb 2007-07-21 18:24:36.000000000 +0200
+++ /mounts/work_src_done/STABLE/kernel-source/needed_space_in_mb 2007-07-22 11:57:03.000000000 +0200
@@ -1 +1 @@
-6000
+3072
++++++ patches.drivers.tar.bz2 ++++++
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/patches.drivers/uio-documentation.patch new/patches.drivers/uio-documentation.patch
--- old/patches.drivers/uio-documentation.patch 1970-01-01 01:00:00.000000000 +0100
+++ new/patches.drivers/uio-documentation.patch 2007-07-21 05:20:05.000000000 +0200
@@ -0,0 +1,647 @@
+From hjk@linutronix.de Mon Dec 11 08:00:14 2006
+From: Hans J. Koch
+To: Greg KH
+Subject: UIO: Documentation
+Date: Mon, 11 Dec 2006 16:59:59 +0100
+Cc: tglx@linutronix.de, Benedikt Spranger
+Message-Id: <200612111659.59948.hjk@linutronix.de>
+Patch-mainline: 2.6.23
+
+From: Hans J. Koch
+
+Documentation for the UIO interface
+
+From: Hans J. Koch
+Signed-off-by: Greg Kroah-Hartman
+---
+ Documentation/DocBook/kernel-api.tmpl | 4
+ Documentation/DocBook/uio-howto.tmpl | 611 ++++++++++++++++++++++++++++++++++
+ 2 files changed, 615 insertions(+)
+
+--- linux-2.6.22.orig/Documentation/DocBook/kernel-api.tmpl
++++ linux-2.6.22/Documentation/DocBook/kernel-api.tmpl
+@@ -406,6 +406,10 @@ X!Edrivers/pnp/system.c
+ !Edrivers/pnp/manager.c
+ !Edrivers/pnp/support.c
+ </sect1>
++ <sect1><title>Userspace IO devices</title>
++!Edrivers/uio/uio.c
++!Iinclude/linux/uio_driver.h
++ </sect1>
+ </chapter>
+
+ <chapter id="blkdev">
+--- /dev/null
++++ linux-2.6.22/Documentation/DocBook/uio-howto.tmpl
+@@ -0,0 +1,611 @@
++<?xml version="1.0" encoding="UTF-8"?>
++<!DOCTYPE book PUBLIC "-//OASIS//DTD DocBook XML V4.2//EN"
++"http://www.oasis-open.org/docbook/xml/4.2/docbookx.dtd" []>
++
++<book id="index">
++<bookinfo>
++<title>The Userspace I/O HOWTO</title>
++
++<author>
++ <firstname>Hans-Jürgen</firstname>
++ <surname>Koch</surname>
++ <authorblurb><para>Linux developer, Linutronix</para></authorblurb>
++ <affiliation>
++ <orgname>
++ <ulink url="http://www.linutronix.de">Linutronix</ulink>
++ </orgname>
++
++ <address>
++ <email>hjk@linutronix.de</email>
++ </address>
++ </affiliation>
++</author>
++
++<pubdate>2006-12-11</pubdate>
++
++<abstract>
++ <para>This HOWTO describes concept and usage of Linux kernel's
++ Userspace I/O system.</para>
++</abstract>
++
++<revhistory>
++ <revision>
++ <revnumber>0.3</revnumber>
++ <date>2007-04-29</date>
++ <authorinitials>hjk</authorinitials>
++ <revremark>Added section about userspace drivers.</revremark>
++ </revision>
++ <revision>
++ <revnumber>0.2</revnumber>
++ <date>2007-02-13</date>
++ <authorinitials>hjk</authorinitials>
++ <revremark>Update after multiple mappings were added.</revremark>
++ </revision>
++ <revision>
++ <revnumber>0.1</revnumber>
++ <date>2006-12-11</date>
++ <authorinitials>hjk</authorinitials>
++ <revremark>First draft.</revremark>
++ </revision>
++</revhistory>
++</bookinfo>
++
++<chapter id="aboutthisdoc">
++<?dbhtml filename="about.html"?>
++<title>About this document</title>
++
++<sect1 id="copyright">
++<?dbhtml filename="copyright.html"?>
++<title>Copyright and License</title>
++<para>
++ Copyright (c) 2006 by Hans-Jürgen Koch.</para>
++<para>
++This documentation is Free Software licensed under the terms of the
++GPL version 2.
++</para>
++</sect1>
++
++<sect1 id="translations">
++<?dbhtml filename="translations.html"?>
++<title>Translations</title>
++
++<para>If you know of any translations for this document, or you are
++interested in translating it, please email me
++<email>hjk@linutronix.de</email>.
++</para>
++</sect1>
++
++<sect1 id="preface">
++<title>Preface</title>
++ <para>
++ For many types of devices, creating a Linux kernel driver is
++ overkill. All that is really needed is some way to handle an
++ interrupt and provide access to the memory space of the
++ device. The logic of controlling the device does not
++ necessarily have to be within the kernel, as the device does
++ not need to take advantage of any of other resources that the
++ kernel provides. One such common class of devices that are
++ like this are for industrial I/O cards.
++ </para>
++ <para>
++ To address this situation, the userspace I/O system (UIO) was
++ designed. For typical industrial I/O cards, only a very small
++ kernel module is needed. The main part of the driver will run in
++ user space. This simplifies development and reduces the risk of
++ serious bugs within a kernel module.
++ </para>
++</sect1>
++
++<sect1 id="thanks">
++<title>Acknowledgments</title>
++ <para>I'd like to thank Thomas Gleixner and Benedikt Spranger of
++ Linutronix, who have not only written most of the UIO code, but also
++ helped greatly writing this HOWTO by giving me all kinds of background
++ information.</para>
++</sect1>
++
++<sect1 id="feedback">
++<title>Feedback</title>
++ <para>Find something wrong with this document? (Or perhaps something
++ right?) I would love to hear from you. Please email me at
++ <email>hjk@linutronix.de</email>.</para>
++</sect1>
++</chapter>
++
++<chapter id="about">
++<?dbhtml filename="about.html"?>
++<title>About UIO</title>
++
++<para>If you use UIO for your card's driver, here's what you get:</para>
++
++<itemizedlist>
++<listitem>
++ <para>only one small kernel module to write and maintain.</para>
++</listitem>
++<listitem>
++ <para>develop the main part of your driver in user space,
++ with all the tools and libraries you're used to.</para>
++</listitem>
++<listitem>
++ <para>bugs in your driver won't crash the kernel.</para>
++</listitem>
++<listitem>
++ <para>updates of your driver can take place without recompiling
++ the kernel.</para>
++</listitem>
++<listitem>
++ <para>if you need to keep some parts of your driver closed source,
++ you can do so without violating the GPL license on the kernel.</para>
++</listitem>
++</itemizedlist>
++
++<sect1 id="how_uio_works">
++<title>How UIO works</title>
++ <para>
++ Each UIO device is accessed through a device file and several
++ sysfs attribute files. The device file will be called
++ <filename>/dev/uio0</filename> for the first device, and
++ <filename>/dev/uio1</filename>, <filename>/dev/uio2</filename>
++ and so on for subsequent devices.
++ </para>
++
++ <para><filename>/dev/uioX</filename> is used to access the
++ address space of the card. Just use
++ <function>mmap()</function> to access registers or RAM
++ locations of your card.
++ </para>
++
++ <para>
++ Interrupts are handled by reading from
++ <filename>/dev/uioX</filename>. A blocking
++ <function>read()</function> from
++ <filename>/dev/uioX</filename> will return as soon as an
++ interrupt occurs. You can also use
++ <function>select()</function> on
++ <filename>/dev/uioX</filename> to wait for an interrupt. The
++ integer value read from <filename>/dev/uioX</filename>
++ represents the total interrupt count. You can use this number
++ to figure out if you missed some interrupts.
++ </para>
++
++ <para>
++ To handle interrupts properly, your custom kernel module can
++ provide its own interrupt handler. It will automatically be
++ called by the built-in handler.
++ </para>
++
++ <para>
++ For cards that don't generate interrupts but need to be
++ polled, there is the possibility to set up a timer that
++ triggers the interrupt handler at configurable time intervals.
++ See <filename>drivers/uio/uio_dummy.c</filename> for an
++ example of this technique.
++ </para>
++
++ <para>
++ Each driver provides attributes that are used to read or write
++ variables. These attributes are accessible through sysfs
++ files. A custom kernel driver module can add its own
++ attributes to the device owned by the uio driver, but not added
++ to the UIO device itself at this time. This might change in the
++ future if it would be found to be useful.
++ </para>
++
++ <para>
++ The following standard attributes are provided by the UIO
++ framework:
++ </para>
++<itemizedlist>
++<listitem>
++ <para>
++ <filename>name</filename>: The name of your device. It is
++ recommended to use the name of your kernel module for this.
++ </para>
++</listitem>
++<listitem>
++ <para>
++ <filename>version</filename>: A version string defined by your
++ driver. This allows the user space part of your driver to deal
++ with different versions of the kernel module.
++ </para>
++</listitem>
++<listitem>
++ <para>
++ <filename>event</filename>: The total number of interrupts
++ handled by the driver since the last time the device node was
++ read.
++ </para>
++</listitem>
++</itemizedlist>
++<para>
++ These attributes appear under the
++ <filename>/sys/class/uio/uioX</filename> directory. Please
++ note that this directory might be a symlink, and not a real
++ directory. Any userspace code that accesses it must be able
++ to handle this.
++</para>
++<para>
++ Each UIO device can make one or more memory regions available for
++ memory mapping. This is necessary because some industrial I/O cards
++ require access to more than one PCI memory region in a driver.
++</para>
++<para>
++ Each mapping has its own directory in sysfs, the first mapping
++ appears as <filename>/sys/class/uio/uioX/maps/map0/</filename>.
++ Subsequent mappings create directories <filename>map1/</filename>,
++ <filename>map2/</filename>, and so on. These directories will only
++ appear if the size of the mapping is not 0.
++</para>
++<para>
++ Each <filename>mapX/</filename> directory contains two read-only files
++ that show start address and size of the memory:
++</para>
++<itemizedlist>
++<listitem>
++ <para>
++ <filename>addr</filename>: The address of memory that can be mapped.
++ </para>
++</listitem>
++<listitem>
++ <para>
++ <filename>size</filename>: The size, in bytes, of the memory
++ pointed to by addr.
++ </para>
++</listitem>
++</itemizedlist>
++
++<para>
++ From userspace, the different mappings are distinguished by adjusting
++ the <varname>offset</varname> parameter of the
++ <function>mmap()</function> call. To map the memory of mapping N, you
++ have to use N times the page size as your offset:
++</para>
++<programlisting format="linespecific">
++offset = N * getpagesize();
++</programlisting>
++
++</sect1>
++</chapter>
++
++<chapter id="using-uio_dummy" xreflabel="Using uio_dummy">
++<?dbhtml filename="using-uio_dummy.html"?>
++<title>Using uio_dummy</title>
++ <para>
++ Well, there is no real use for uio_dummy. Its only purpose is
++ to test most parts of the UIO system (everything except
++ hardware interrupts), and to serve as an example for the
++ kernel module that you will have to write yourself.
++ </para>
++
++<sect1 id="what_uio_dummy_does">
++<title>What uio_dummy does</title>
++ <para>
++ The kernel module <filename>uio_dummy.ko</filename> creates a
++ device that uses a timer to generate periodic interrupts. The
++ interrupt handler does nothing but increment a counter. The
++ driver adds two custom attributes, <varname>count</varname>
++ and <varname>freq</varname>, that appear under
++ <filename>/sys/devices/platform/uio_dummy/</filename>.
++ </para>
++
++ <para>
++ The attribute <varname>count</varname> can be read and
++ written. The associated file
++ <filename>/sys/devices/platform/uio_dummy/count</filename>
++ appears as a normal text file and contains the total number of
++ timer interrupts. If you look at it (e.g. using
++ <function>cat</function>), you'll notice it is slowly counting
++ up.
++ </para>
++
++ <para>
++ The attribute <varname>freq</varname> can be read and written.
++ The content of
++ <filename>/sys/devices/platform/uio_dummy/freq</filename>
++ represents the number of system timer ticks between two timer
++ interrupts. The default value of <varname>freq</varname> is
++ the value of the kernel variable <varname>HZ</varname>, which
++ gives you an interval of one second. Lower values will
++ increase the frequency. Try the following:
++ </para>
++<programlisting format="linespecific">
++cd /sys/devices/platform/uio_dummy/
++echo 100 > freq
++</programlisting>
++ <para>
++ Use <function>cat count</function> to see how the interrupt
++ frequency changes.
++ </para>
++</sect1>
++</chapter>
++
++<chapter id="custom_kernel_module" xreflabel="Writing your own kernel module">
++<?dbhtml filename="custom_kernel_module.html"?>
++<title>Writing your own kernel module</title>
++ <para>
++ Please have a look at <filename>uio_dummy.c</filename> as an
++ example. The following paragraphs explain the different
++ sections of this file.
++ </para>
++
++<sect1 id="uio_info">
++<title>struct uio_info</title>
++ <para>
++ This structure tells the framework the details of your driver,
++ Some of the members are required, others are optional.
++ </para>
++
++<itemizedlist>
++<listitem><para>
++<varname>char *name</varname>: Required. The name of your driver as
++it will appear in sysfs. I recommend using the name of your module for this.
++</para></listitem>
++
++<listitem><para>
++<varname>char *version</varname>: Required. This string appears in
++<filename>/sys/class/uio/uioX/version</filename>.
++</para></listitem>
++
++<listitem><para>
++<varname>struct uio_mem mem[ MAX_UIO_MAPS ]</varname>: Required if you
++have memory that can be mapped with <function>mmap()</function>. For each
++mapping you need to fill one of the <varname>uio_mem</varname> structures.
++See the description below for details.
++</para></listitem>
++
++<listitem><para>
++<varname>long irq</varname>: Required. If your hardware generates an
++interrupt, it's your modules task to determine the irq number during
++initialization. If you don't have a hardware generated interrupt but
++want to trigger the interrupt handler in some other way, set
++<varname>irq</varname> to <varname>UIO_IRQ_CUSTOM</varname>. The
++uio_dummy module does this as it triggers the event mechanism in a timer
++routine. If you had no interrupt at all, you could set
++<varname>irq</varname> to <varname>UIO_IRQ_NONE</varname>, though this
++rarely makes sense.
++</para></listitem>
++
++<listitem><para>
++<varname>unsigned long irq_flags</varname>: Required if you've set
++<varname>irq</varname> to a hardware interrupt number. The flags given
++here will be used in the call to <function>request_irq()</function>.
++</para></listitem>
++
++<listitem><para>
++<varname>int (*mmap)(struct uio_info *info, struct vm_area_struct
++*vma)</varname>: Optional. If you need a special
++<function>mmap()</function> function, you can set it here. If this
++pointer is not NULL, your <function>mmap()</function> will be called
++instead of the built-in one.
++</para></listitem>
++
++<listitem><para>
++<varname>int (*open)(struct uio_info *info, struct inode *inode)
++</varname>: Optional. You might want to have your own
++<function>open()</function>, e.g. to enable interrupts only when your
++device is actually used.
++</para></listitem>
++
++<listitem><para>
++<varname>int (*release)(struct uio_info *info, struct inode *inode)
++</varname>: Optional. If you define your own
++<function>open()</function>, you will probably also want a custom
++<function>release()</function> function.
++</para></listitem>
++</itemizedlist>
++
++<para>
++Usually, your device will have one or more memory regions that can be mapped
++to user space. For each region, you have to set up a
++<varname>struct uio_mem</varname> in the <varname>mem[]</varname> array.
++Here's a description of the fields of <varname>struct uio_mem</varname>:
++</para>
++
++<itemizedlist>
++<listitem><para>
++<varname>int memtype</varname>: Required if the mapping is used. Set this to
++<varname>UIO_MEM_PHYS</varname> if you you have physical memory on your
++card to be mapped. Use <varname>UIO_MEM_LOGICAL</varname> for logical
++memory (e.g. allocated with <function>kmalloc()</function>). There's also
++<varname>UIO_MEM_VIRTUAL</varname> for virtual memory.
++</para></listitem>
++
++<listitem><para>
++<varname>unsigned long addr</varname>: Required if the mapping is used.
++Fill in the address of your memory block. This address is the one that
++appears in sysfs.
++</para></listitem>
++
++<listitem><para>
++<varname>unsigned long size</varname>: Fill in the size of the
++memory block that <varname>addr</varname> points to. If <varname>size</varname>
++is zero, the mapping is considered unused. Note that you
++<emphasis>must</emphasis> initialize <varname>size</varname> with zero for
++all unused mappings.
++</para></listitem>
++
++<listitem><para>
++<varname>void *internal_addr</varname>: If you have to access this memory
++region from within your kernel module, you will want to map it internally by
++using something like <function>ioremap()</function>. Addresses
++returned by this function cannot be mapped to user space, so you must not
++store it in <varname>addr</varname>. Use <varname>internal_addr</varname>
++instead to remember such an address.
++</para></listitem>
++</itemizedlist>
++
++<para>
++Please do not touch the <varname>kobj</varname> element of
++<varname>struct uio_mem</varname>! It is used by the UIO framework
++to set up sysfs files for this mapping. Simply leave it alone.
++</para>
++</sect1>
++
++<sect1 id="adding_irq_handler">
++<title>Adding an interrupt handler</title>
++ <para>
++ What you need to do in your interrupt handler depends on your
++ hardware and on how you want to handle it. You should try to
++ keep the amount of code in your kernel interrupt handler low.
++ If your hardware requires no action that you
++ <emphasis>have</emphasis> to perform after each interrupt,
++ then your handler can be empty.</para> <para>If, on the other
++ hand, your hardware <emphasis>needs</emphasis> some action to
++ be performed after each interrupt, then you
++ <emphasis>must</emphasis> do it in your kernel module. Note
++ that you cannot rely on the userspace part of your driver. Your
++ userspace program can terminate at any time, possibly leaving
++ your hardware in a state where proper interrupt handling is
++ still required.
++ </para>
++
++ <para>
++ There might also be applications where you want to read data
++ from your hardware at each interrupt and buffer it in a piece
++ of kernel memory you've allocated for that purpose. With this
++ technique you could avoid loss of data if your userspace
++ program misses an interrupt.
++ </para>
++
++ <para>
++ A note on shared interrupts: Your driver should support
++ interrupt sharing whenever this is possible. It is possible if
++ and only if your driver can detect whether your hardware has
++ triggered the interrupt or not. This is usually done by looking
++ at an interrupt status register. If your driver sees that the
++ IRQ bit is actually set, it will perform its actions, and the
++ handler returns IRQ_HANDLED. If the driver detects that it was
++ not your hardware that caused the interrupt, it will do nothing
++ and return IRQ_NONE, allowing the kernel to call the next
++ possible interrupt handler.
++ </para>
++
++ <para>
++ If you decide not to support shared interrupts, your card
++ won't work in computers with no free interrupts. As this
++ frequently happens on the PC platform, you can save yourself a
++ lot of trouble by supporting interrupt sharing.
++ </para>
++</sect1>
++
++</chapter>
++
++<chapter id="userspace_driver" xreflabel="Writing a driver in user space">
++<?dbhtml filename="userspace_driver.html"?>
++<title>Writing a driver in userspace</title>
++ <para>
++ Once you have a working kernel module for your hardware, you can
++ write the userspace part of your driver. You don't need any special
++ libraries, your driver can be written in any reasonable language,
++ you can use floating point numbers and so on. In short, you can
++ use all the tools and libraries you'd normally use for writing a
++ userspace application.
++ </para>
++
++<sect1 id="getting_uio_information">
++<title>Getting information about your UIO device</title>
++ <para>
++ Information about all UIO devices is available in sysfs. The
++ first thing you should do in your driver is check
++ <varname>name</varname> and <varname>version</varname> to
++ make sure your talking to the right device and that its kernel
++ driver has the version you expect.
++ </para>
++ <para>
++ You should also make sure that the memory mapping you need
++ exists and has the size you expect.
++ </para>
++ <para>
++ There is a tool called <varname>lsuio</varname> that lists
++ UIO devices and their attributes. It is available here:
++ </para>
++ <para>
++ <ulink url="http://www.osadl.org/projects/downloads/UIO/user/">
++ http://www.osadl.org/projects/downloads/UIO/user/</ulink>
++ </para>
++ <para>
++ With <varname>lsuio</varname> you can quickly check if your
++ kernel module is loaded and which attributes it exports.
++ Have a look at the manpage for details.
++ </para>
++ <para>
++ The source code of <varname>lsuio</varname> can serve as an
++ example for getting information about an UIO device.
++ The file <filename>uio_helper.c</filename> contains a lot of
++ functions you could use in your userspace driver code.
++ </para>
++</sect1>
++
++<sect1 id="mmap_device_memory">
++<title>mmap() device memory</title>
++ <para>
++ After you made sure you've got the right device with the
++ memory mappings you need, all you have to do is to call
++ <function>mmap()</function> to map the device's memory
++ to userspace.
++ </para>
++ <para>
++ The parameter <varname>offset</varname> of the
++ <function>mmap()</function> call has a special meaning
++ for UIO devices: It is used to select which mapping of
++ your device you want to map. To map the memory of
++ mapping N, you have to use N times the page size as
++ your offset:
++ </para>
++<programlisting format="linespecific">
++ offset = N * getpagesize();
++</programlisting>
++ <para>
++ N starts from zero, so if you've got only one memory
++ range to map, set <varname>offset = 0</varname>.
++ A drawback of this technique is that memory is always
++ mapped beginning with its start address.
++ </para>
++</sect1>
++
++<sect1 id="wait_for_interrupts">
++<title>Waiting for interrupts</title>
++ <para>
++ After you successfully mapped your devices memory, you
++ can access it like an ordinary array. Usually, you will
++ perform some initialization. After that, your hardware
++ starts working and will generate an interrupt as soon
++ as it's finished, has some data available, or needs your
++ attention because an error occured.
++ </para>
++ <para>
++ <filename>/dev/uioX</filename> is a read-only file. A
++ <function>read()</function> will always block until an
++ interrupt occurs. There is only one legal value for the
++ <varname>count</varname> parameter of
++ <function>read()</function>, and that is the size of a
++ signed 32 bit integer (4). Any other value for
++ <varname>count</varname> causes <function>read()</function>
++ to fail. The signed 32 bit integer read is the interrupt
++ count of your device. If the value is one more than the value
++ you read the last time, everything is OK. If the difference
++ is greater than one, you missed interrupts.
++ </para>
++ <para>
++ You can also use <function>select()</function> on
++ <filename>/dev/uioX</filename>.
++ </para>
++</sect1>
++
++</chapter>
++
++<appendix id="app1">
++<title>Further information</title>
++<itemizedlist>
++ <listitem><para>
++ <ulink url="http://www.osadl.org">
++ OSADL homepage.</ulink>
++ </para></listitem>
++ <listitem><para>
++ <ulink url="http://www.linutronix.de">
++ Linutronix homepage.</ulink>
++ </para></listitem>
++</itemizedlist>
++</appendix>
++
++</book>
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/patches.drivers/uio.patch new/patches.drivers/uio.patch
--- old/patches.drivers/uio.patch 1970-01-01 01:00:00.000000000 +0100
+++ new/patches.drivers/uio.patch 2007-07-21 05:20:05.000000000 +0200
@@ -0,0 +1,871 @@
+From hjk@linutronix.de Thu Dec 7 01:58:37 2006
+From: Hans J. Koch
+To: Greg KH
+Cc: tglx@linutronix.de, Benedikt Spranger
+Subject: UIO: Add the User IO core code
+Date: Thu, 7 Dec 2006 10:58:29 +0100
+Patch-mainline: 2.6.23
+
+From: Hans J. Koch
+
+This interface allows the ability to write the majority of a driver in
+userspace with only a very small shell of a driver in the kernel itself.
+It uses a char device and sysfs to interact with a userspace process to
+process interrupts and control memory accesses.
+
+See the docbook documentation for more details on how to use this
+interface.
+
+From: Hans J. Koch
+Cc: Thomas Gleixner
+Cc: Benedikt Spranger
+Signed-off-by: Greg Kroah-Hartman
+
+---
+ drivers/Kconfig | 1
+ drivers/Makefile | 1
+ drivers/uio/Kconfig | 16 +
+ drivers/uio/Makefile | 1
+ drivers/uio/uio.c | 701 +++++++++++++++++++++++++++++++++++++++++++++
+ include/linux/uio_driver.h | 91 +++++
+ 6 files changed, 811 insertions(+)
+
+--- linux-2.6.22.orig/drivers/Kconfig
++++ linux-2.6.22/drivers/Kconfig
+@@ -84,4 +84,5 @@ source "drivers/auxdisplay/Kconfig"
+
+ source "drivers/kvm/Kconfig"
+
++source "drivers/uio/Kconfig"
+ endmenu
+--- linux-2.6.22.orig/drivers/Makefile
++++ linux-2.6.22/drivers/Makefile
+@@ -38,6 +38,7 @@ obj-$(CONFIG_ATA) += ata/
+ obj-$(CONFIG_FUSION) += message/
+ obj-$(CONFIG_FIREWIRE) += firewire/
+ obj-$(CONFIG_IEEE1394) += ieee1394/
++obj-$(CONFIG_UIO) += uio/
+ obj-y += cdrom/
+ obj-y += auxdisplay/
+ obj-$(CONFIG_MTD) += mtd/
+--- /dev/null
++++ linux-2.6.22/drivers/uio/Kconfig
+@@ -0,0 +1,16 @@
++menu "Userspace I/O"
++ depends on !S390
++
++config UIO
++ tristate "Userspace I/O drivers"
++ default n
++ help
++ Enable this to allow the userspace driver core code to be
++ built. This code allows userspace programs easy access to
++ kernel interrupts and memory locations, allowing some drivers
++ to be written in userspace. Note that a small kernel driver
++ is also required for interrupt handling to work properly.
++
++ If you don't know what to do here, say N.
++
++endmenu
+--- /dev/null
++++ linux-2.6.22/drivers/uio/Makefile
+@@ -0,0 +1 @@
++obj-$(CONFIG_UIO) += uio.o
+--- /dev/null
++++ linux-2.6.22/drivers/uio/uio.c
+@@ -0,0 +1,701 @@
++/*
++ * drivers/uio/uio.c
++ *
++ * Copyright(C) 2005, Benedikt Spranger
++ * Copyright(C) 2005, Thomas Gleixner
++ * Copyright(C) 2006, Hans J. Koch
++ * Copyright(C) 2006, Greg Kroah-Hartman
++ *
++ * Userspace IO
++ *
++ * Base Functions
++ *
++ * Licensed under the GPLv2 only.
++ */
++
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++#include
++
++#define UIO_MAX_DEVICES 255
++
++struct uio_device {
++ struct module *owner;
++ struct device *dev;
++ int minor;
++ atomic_t event;
++ struct fasync_struct *async_queue;
++ wait_queue_head_t wait;
++ int vma_count;
++ struct uio_info *info;
++ struct kset map_attr_kset;
++};
++
++static int uio_major;
++static DEFINE_IDR(uio_idr);
++static struct file_operations uio_fops;
++
++/* UIO class infrastructure */
++static struct uio_class {
++ struct kref kref;
++ struct class *class;
++} *uio_class;
++
++/*
++ * attributes
++ */
++
++static struct attribute attr_addr = {
++ .name = "addr",
++ .mode = S_IRUGO,
++};
++
++static struct attribute attr_size = {
++ .name = "size",
++ .mode = S_IRUGO,
++};
++
++static struct attribute* map_attrs[] = {
++ &attr_addr, &attr_size, NULL
++};
++
++static ssize_t map_attr_show(struct kobject *kobj, struct attribute *attr,
++ char *buf)
++{
++ struct uio_mem *mem = container_of(kobj, struct uio_mem, kobj);
++
++ if (strncmp(attr->name,"addr",4) == 0)
++ return sprintf(buf, "0x%lx\n", mem->addr);
++
++ if (strncmp(attr->name,"size",4) == 0)
++ return sprintf(buf, "0x%lx\n", mem->size);
++
++ return -ENODEV;
++}
++
++static void map_attr_release(struct kobject *kobj)
++{
++ /* TODO ??? */
++}
++
++static struct sysfs_ops map_attr_ops = {
++ .show = map_attr_show,
++};
++
++static struct kobj_type map_attr_type = {
++ .release = map_attr_release,
++ .sysfs_ops = &map_attr_ops,
++ .default_attrs = map_attrs,
++};
++
++static ssize_t show_name(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct uio_device *idev = dev_get_drvdata(dev);
++ if (idev)
++ return sprintf(buf, "%s\n", idev->info->name);
++ else
++ return -ENODEV;
++}
++static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
++
++static ssize_t show_version(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct uio_device *idev = dev_get_drvdata(dev);
++ if (idev)
++ return sprintf(buf, "%s\n", idev->info->version);
++ else
++ return -ENODEV;
++}
++static DEVICE_ATTR(version, S_IRUGO, show_version, NULL);
++
++static ssize_t show_event(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct uio_device *idev = dev_get_drvdata(dev);
++ if (idev)
++ return sprintf(buf, "%u\n",
++ (unsigned int)atomic_read(&idev->event));
++ else
++ return -ENODEV;
++}
++static DEVICE_ATTR(event, S_IRUGO, show_event, NULL);
++
++static struct attribute *uio_attrs[] = {
++ &dev_attr_name.attr,
++ &dev_attr_version.attr,
++ &dev_attr_event.attr,
++ NULL,
++};
++
++static struct attribute_group uio_attr_grp = {
++ .attrs = uio_attrs,
++};
++
++/*
++ * device functions
++ */
++static int uio_dev_add_attributes(struct uio_device *idev)
++{
++ int ret;
++ int mi;
++ int map_found = 0;
++ struct uio_mem *mem;
++
++ ret = sysfs_create_group(&idev->dev->kobj, &uio_attr_grp);
++ if (ret)
++ goto err_group;
++
++ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
++ mem = &idev->info->mem[mi];
++ if (mem->size == 0)
++ break;
++ if (!map_found) {
++ map_found = 1;
++ kobject_set_name(&idev->map_attr_kset.kobj,"maps");
++ idev->map_attr_kset.ktype = &map_attr_type;
++ idev->map_attr_kset.kobj.parent = &idev->dev->kobj;
++ ret = kset_register(&idev->map_attr_kset);
++ if (ret)
++ goto err_remove_group;
++ }
++ kobject_init(&mem->kobj);
++ kobject_set_name(&mem->kobj,"map%d",mi);
++ mem->kobj.parent = &idev->map_attr_kset.kobj;
++ mem->kobj.kset = &idev->map_attr_kset;
++ ret = kobject_add(&mem->kobj);
++ if (ret)
++ goto err_remove_maps;
++ }
++
++ return 0;
++
++err_remove_maps:
++ for (mi--; mi>=0; mi--) {
++ mem = &idev->info->mem[mi];
++ kobject_unregister(&mem->kobj);
++ }
++ kset_unregister(&idev->map_attr_kset); /* Needed ? */
++err_remove_group:
++ sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
++err_group:
++ dev_err(idev->dev, "error creating sysfs files (%d)\n", ret);
++ return ret;
++}
++
++static void uio_dev_del_attributes(struct uio_device *idev)
++{
++ int mi;
++ struct uio_mem *mem;
++ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
++ mem = &idev->info->mem[mi];
++ if (mem->size == 0)
++ break;
++ kobject_unregister(&mem->kobj);
++ }
++ kset_unregister(&idev->map_attr_kset);
++ sysfs_remove_group(&idev->dev->kobj, &uio_attr_grp);
++}
++
++static int uio_get_minor(struct uio_device *idev)
++{
++ static DEFINE_MUTEX(minor_lock);
++ int retval = -ENOMEM;
++ int id;
++
++ mutex_lock(&minor_lock);
++ if (idr_pre_get(&uio_idr, GFP_KERNEL) == 0)
++ goto exit;
++
++ retval = idr_get_new(&uio_idr, idev, &id);
++ if (retval < 0) {
++ if (retval == -EAGAIN)
++ retval = -ENOMEM;
++ goto exit;
++ }
++ idev->minor = id & MAX_ID_MASK;
++exit:
++ mutex_unlock(&minor_lock);
++ return retval;
++}
++
++static void uio_free_minor(struct uio_device *idev)
++{
++ idr_remove(&uio_idr, idev->minor);
++}
++
++/**
++ * uio_event_notify - trigger an interrupt event
++ * @info: UIO device capabilities
++ */
++void uio_event_notify(struct uio_info *info)
++{
++ struct uio_device *idev = info->uio_dev;
++
++ atomic_inc(&idev->event);
++ wake_up_interruptible(&idev->wait);
++ kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
++}
++EXPORT_SYMBOL_GPL(uio_event_notify);
++
++/**
++ * uio_interrupt - hardware interrupt handler
++ * @irq: IRQ number, can be UIO_IRQ_CYCLIC for cyclic timer
++ * @dev_id: Pointer to the devices uio_device structure
++ */
++static irqreturn_t uio_interrupt(int irq, void *dev_id)
++{
++ struct uio_device *idev = (struct uio_device *)dev_id;
++ irqreturn_t ret = idev->info->handler(irq, idev->info);
++
++ if (ret == IRQ_HANDLED)
++ uio_event_notify(idev->info);
++
++ return ret;
++}
++
++struct uio_listener {
++ struct uio_device *dev;
++ s32 event_count;
++};
++
++static int uio_open(struct inode *inode, struct file *filep)
++{
++ struct uio_device *idev;
++ struct uio_listener *listener;
++ int ret = 0;
++
++ idev = idr_find(&uio_idr, iminor(inode));
++ if (!idev)
++ return -ENODEV;
++
++ listener = kmalloc(sizeof(*listener), GFP_KERNEL);
++ if (!listener)
++ return -ENOMEM;
++
++ listener->dev = idev;
++ listener->event_count = atomic_read(&idev->event);
++ filep->private_data = listener;
++
++ if (idev->info->open) {
++ if (!try_module_get(idev->owner))
++ return -ENODEV;
++ ret = idev->info->open(idev->info, inode);
++ module_put(idev->owner);
++ }
++
++ if (ret)
++ kfree(listener);
++
++ return ret;
++}
++
++static int uio_fasync(int fd, struct file *filep, int on)
++{
++ struct uio_listener *listener = filep->private_data;
++ struct uio_device *idev = listener->dev;
++
++ return fasync_helper(fd, filep, on, &idev->async_queue);
++}
++
++static int uio_release(struct inode *inode, struct file *filep)
++{
++ int ret = 0;
++ struct uio_listener *listener = filep->private_data;
++ struct uio_device *idev = listener->dev;
++
++ if (idev->info->release) {
++ if (!try_module_get(idev->owner))
++ return -ENODEV;
++ ret = idev->info->release(idev->info, inode);
++ module_put(idev->owner);
++ }
++ if (filep->f_flags & FASYNC)
++ ret = uio_fasync(-1, filep, 0);
++ kfree(listener);
++ return ret;
++}
++
++static unsigned int uio_poll(struct file *filep, poll_table *wait)
++{
++ struct uio_listener *listener = filep->private_data;
++ struct uio_device *idev = listener->dev;
++
++ if (idev->info->irq == UIO_IRQ_NONE)
++ return -EIO;
++
++ poll_wait(filep, &idev->wait, wait);
++ if (listener->event_count != atomic_read(&idev->event))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static ssize_t uio_read(struct file *filep, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ struct uio_listener *listener = filep->private_data;
++ struct uio_device *idev = listener->dev;
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval;
++ s32 event_count;
++
++ if (idev->info->irq == UIO_IRQ_NONE)
++ return -EIO;
++
++ if (count != sizeof(s32))
++ return -EINVAL;
++
++ add_wait_queue(&idev->wait, &wait);
++
++ do {
++ set_current_state(TASK_INTERRUPTIBLE);
++
++ event_count = atomic_read(&idev->event);
++ if (event_count != listener->event_count) {
++ if (copy_to_user(buf, &event_count, count))
++ retval = -EFAULT;
++ else {
++ listener->event_count = event_count;
++ retval = count;
++ }
++ break;
++ }
++
++ if (filep->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ break;
++ }
++
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ break;
++ }
++ schedule();
++ } while (1);
++
++ __set_current_state(TASK_RUNNING);
++ remove_wait_queue(&idev->wait, &wait);
++
++ return retval;
++}
++
++static int uio_find_mem_index(struct vm_area_struct *vma)
++{
++ int mi;
++ struct uio_device *idev = vma->vm_private_data;
++
++ for (mi = 0; mi < MAX_UIO_MAPS; mi++) {
++ if (idev->info->mem[mi].size == 0)
++ return -1;
++ if (vma->vm_pgoff == mi)
++ return mi;
++ }
++ return -1;
++}
++
++static void uio_vma_open(struct vm_area_struct *vma)
++{
++ struct uio_device *idev = vma->vm_private_data;
++ idev->vma_count++;
++}
++
++static void uio_vma_close(struct vm_area_struct *vma)
++{
++ struct uio_device *idev = vma->vm_private_data;
++ idev->vma_count--;
++}
++
++static struct page *uio_vma_nopage(struct vm_area_struct *vma,
++ unsigned long address, int *type)
++{
++ struct uio_device *idev = vma->vm_private_data;
++ struct page* page = NOPAGE_SIGBUS;
++
++ int mi = uio_find_mem_index(vma);
++ if (mi < 0)
++ return page;
++
++ if (idev->info->mem[mi].memtype == UIO_MEM_LOGICAL)
++ page = virt_to_page(idev->info->mem[mi].addr);
++ else
++ page = vmalloc_to_page((void*)idev->info->mem[mi].addr);
++ get_page(page);
++ if (type)
++ *type = VM_FAULT_MINOR;
++ return page;
++}
++
++static struct vm_operations_struct uio_vm_ops = {
++ .open = uio_vma_open,
++ .close = uio_vma_close,
++ .nopage = uio_vma_nopage,
++};
++
++static int uio_mmap_physical(struct vm_area_struct *vma)
++{
++ struct uio_device *idev = vma->vm_private_data;
++ int mi = uio_find_mem_index(vma);
++ if (mi < 0)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++
++ return remap_pfn_range(vma,
++ vma->vm_start,
++ idev->info->mem[mi].addr >> PAGE_SHIFT,
++ vma->vm_end - vma->vm_start,
++ vma->vm_page_prot);
++}
++
++static int uio_mmap_logical(struct vm_area_struct *vma)
++{
++ vma->vm_flags |= VM_RESERVED;
++ vma->vm_ops = &uio_vm_ops;
++ uio_vma_open(vma);
++ return 0;
++}
++
++static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
++{
++ struct uio_listener *listener = filep->private_data;
++ struct uio_device *idev = listener->dev;
++ int mi;
++ unsigned long requested_pages, actual_pages;
++ int ret = 0;
++
++ if (vma->vm_end < vma->vm_start)
++ return -EINVAL;
++
++ vma->vm_private_data = idev;
++
++ mi = uio_find_mem_index(vma);
++ if (mi < 0)
++ return -EINVAL;
++
++ requested_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ actual_pages = (idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
++ if (requested_pages > actual_pages)
++ return -EINVAL;
++
++ if (idev->info->mmap) {
++ if (!try_module_get(idev->owner))
++ return -ENODEV;
++ ret = idev->info->mmap(idev->info, vma);
++ module_put(idev->owner);
++ return ret;
++ }
++
++ switch (idev->info->mem[mi].memtype) {
++ case UIO_MEM_PHYS:
++ return uio_mmap_physical(vma);
++ case UIO_MEM_LOGICAL:
++ case UIO_MEM_VIRTUAL:
++ return uio_mmap_logical(vma);
++ default:
++ return -EINVAL;
++ }
++}
++
++static struct file_operations uio_fops = {
++ .owner = THIS_MODULE,
++ .open = uio_open,
++ .release = uio_release,
++ .read = uio_read,
++ .mmap = uio_mmap,
++ .poll = uio_poll,
++ .fasync = uio_fasync,
++};
++
++static int uio_major_init(void)
++{
++ uio_major = register_chrdev(0, "uio", &uio_fops);
++ if (uio_major < 0)
++ return uio_major;
++ return 0;
++}
++
++static void uio_major_cleanup(void)
++{
++ unregister_chrdev(uio_major, "uio");
++}
++
++static int init_uio_class(void)
++{
++ int ret = 0;
++
++ if (uio_class != NULL) {
++ kref_get(&uio_class->kref);
++ goto exit;
++ }
++
++ /* This is the first time in here, set everything up properly */
++ ret = uio_major_init();
++ if (ret)
++ goto exit;
++
++ uio_class = kzalloc(sizeof(*uio_class), GFP_KERNEL);
++ if (!uio_class) {
++ ret = -ENOMEM;
++ goto err_kzalloc;
++ }
++
++ kref_init(&uio_class->kref);
++ uio_class->class = class_create(THIS_MODULE, "uio");
++ if (IS_ERR(uio_class->class)) {
++ ret = IS_ERR(uio_class->class);
++ printk(KERN_ERR "class_create failed for uio\n");
++ goto err_class_create;
++ }
++ return 0;
++
++err_class_create:
++ kfree(uio_class);
++ uio_class = NULL;
++err_kzalloc:
++ uio_major_cleanup();
++exit:
++ return ret;
++}
++
++static void release_uio_class(struct kref *kref)
++{
++ /* Ok, we cheat as we know we only have one uio_class */
++ class_destroy(uio_class->class);
++ kfree(uio_class);
++ uio_major_cleanup();
++ uio_class = NULL;
++}
++
++static void uio_class_destroy(void)
++{
++ if (uio_class)
++ kref_put(&uio_class->kref, release_uio_class);
++}
++
++/**
++ * uio_register_device - register a new userspace IO device
++ * @owner: module that creates the new device
++ * @parent: parent device
++ * @info: UIO device capabilities
++ *
++ * returns zero on success or a negative error code.
++ */
++int __uio_register_device(struct module *owner,
++ struct device *parent,
++ struct uio_info *info)
++{
++ struct uio_device *idev;
++ int ret = 0;
++
++ if (!parent || !info || !info->name || !info->version)
++ return -EINVAL;
++
++ info->uio_dev = NULL;
++
++ ret = init_uio_class();
++ if (ret)
++ return ret;
++
++ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
++ if (!idev) {
++ ret = -ENOMEM;
++ goto err_kzalloc;
++ }
++
++ idev->owner = owner;
++ idev->info = info;
++ init_waitqueue_head(&idev->wait);
++ atomic_set(&idev->event, 0);
++
++ ret = uio_get_minor(idev);
++ if (ret)
++ goto err_get_minor;
++
++ idev->dev = device_create(uio_class->class, parent,
++ MKDEV(uio_major, idev->minor),
++ "uio%d", idev->minor);
++ if (IS_ERR(idev->dev)) {
++ printk(KERN_ERR "UIO: device register failed\n");
++ ret = PTR_ERR(idev->dev);
++ goto err_device_create;
++ }
++ dev_set_drvdata(idev->dev, idev);
++
++ ret = uio_dev_add_attributes(idev);
++ if (ret)
++ goto err_uio_dev_add_attributes;
++
++ info->uio_dev = idev;
++
++ if (idev->info->irq >= 0) {
++ ret = request_irq(idev->info->irq, uio_interrupt,
++ idev->info->irq_flags, idev->info->name, idev);
++ if (ret)
++ goto err_request_irq;
++ }
++
++ return 0;
++
++err_request_irq:
++ uio_dev_del_attributes(idev);
++err_uio_dev_add_attributes:
++ device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
++err_device_create:
++ uio_free_minor(idev);
++err_get_minor:
++ kfree(idev);
++err_kzalloc:
++ uio_class_destroy();
++ return ret;
++}
++EXPORT_SYMBOL_GPL(__uio_register_device);
++
++/**
++ * uio_unregister_device - unregister a industrial IO device
++ * @info: UIO device capabilities
++ *
++ */
++void uio_unregister_device(struct uio_info *info)
++{
++ struct uio_device *idev;
++
++ if (!info || !info->uio_dev)
++ return;
++
++ idev = info->uio_dev;
++
++ uio_free_minor(idev);
++
++ if (info->irq >= 0)
++ free_irq(info->irq, idev);
++
++ uio_dev_del_attributes(idev);
++
++ dev_set_drvdata(idev->dev, NULL);
++ device_destroy(uio_class->class, MKDEV(uio_major, idev->minor));
++ kfree(idev);
++ uio_class_destroy();
++
++ return;
++}
++EXPORT_SYMBOL_GPL(uio_unregister_device);
++
++static int __init uio_init(void)
++{
++ return 0;
++}
++
++static void __exit uio_exit(void)
++{
++}
++
++module_init(uio_init)
++module_exit(uio_exit)
++MODULE_LICENSE("GPL v2");
+--- /dev/null
++++ linux-2.6.22/include/linux/uio_driver.h
+@@ -0,0 +1,91 @@
++/*
++ * include/linux/uio_driver.h
++ *
++ * Copyright(C) 2005, Benedikt Spranger
++ * Copyright(C) 2005, Thomas Gleixner
++ * Copyright(C) 2006, Hans J. Koch
++ * Copyright(C) 2006, Greg Kroah-Hartman
++ *
++ * Userspace IO driver.
++ *
++ * Licensed under the GPLv2 only.
++ */
++
++#ifndef _UIO_DRIVER_H_
++#define _UIO_DRIVER_H_
++
++#include
++#include
++#include
++
++/**
++ * struct uio_mem - description of a UIO memory region
++ * @kobj: kobject for this mapping
++ * @addr: address of the device's memory
++ * @size: size of IO
++ * @memtype: type of memory addr points to
++ * @internal_addr: ioremap-ped version of addr, for driver internal use
++ */
++struct uio_mem {
++ struct kobject kobj;
++ unsigned long addr;
++ unsigned long size;
++ int memtype;
++ void __iomem *internal_addr;
++};
++
++#define MAX_UIO_MAPS 5
++
++struct uio_device;
++
++/**
++ * struct uio_info - UIO device capabilities
++ * @uio_dev: the UIO device this info belongs to
++ * @name: device name
++ * @version: device driver version
++ * @mem: list of mappable memory regions, size==0 for end of list
++ * @irq: interrupt number or UIO_IRQ_CUSTOM
++ * @irq_flags: flags for request_irq()
++ * @priv: optional private data
++ * @handler: the device's irq handler
++ * @mmap: mmap operation for this uio device
++ * @open: open operation for this uio device
++ * @release: release operation for this uio device
++ */
++struct uio_info {
++ struct uio_device *uio_dev;
++ char *name;
++ char *version;
++ struct uio_mem mem[MAX_UIO_MAPS];
++ long irq;
++ unsigned long irq_flags;
++ void *priv;
++ irqreturn_t (*handler)(int irq, struct uio_info *dev_info);
++ int (*mmap)(struct uio_info *info, struct vm_area_struct *vma);
++ int (*open)(struct uio_info *info, struct inode *inode);
++ int (*release)(struct uio_info *info, struct inode *inode);
++};
++
++extern int __must_check
++ __uio_register_device(struct module *owner,
++ struct device *parent,
++ struct uio_info *info);
++static inline int __must_check
++ uio_register_device(struct device *parent, struct uio_info *info)
++{
++ return __uio_register_device(THIS_MODULE, parent, info);
++}
++extern void uio_unregister_device(struct uio_info *info);
++extern void uio_event_notify(struct uio_info *info);
++
++/* defines for uio_device->irq */
++#define UIO_IRQ_CUSTOM -1
++#define UIO_IRQ_NONE -2
++
++/* defines for uio_device->memtype */
++#define UIO_MEM_NONE 0
++#define UIO_MEM_PHYS 1
++#define UIO_MEM_LOGICAL 2
++#define UIO_MEM_VIRTUAL 3
++
++#endif /* _LINUX_UIO_DRIVER_H_ */
++++++ patches.rt.tar.bz2 ++++++
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/patches.rt/patch-2.6.22.1-rt4.openSUSE new/patches.rt/patch-2.6.22.1-rt4.openSUSE
--- old/patches.rt/patch-2.6.22.1-rt4.openSUSE 2007-07-17 10:48:23.000000000 +0200
+++ new/patches.rt/patch-2.6.22.1-rt4.openSUSE 2007-07-22 11:56:22.000000000 +0200
@@ -45373,8 +45373,8 @@
/*
* find_idlest_cpu - find the idlest cpu among the cpus in group.
*/
-@@ -1420,9 +1577,9 @@ static int sched_balance_self(int cpu, i
- return find_idlest_cpu_nodomain(t, cpu);
+@@ -1392,9 +1549,9 @@ static int sched_balance_self(int cpu, i
+ struct sched_domain *tmp, *sd = NULL;
for_each_domain(cpu, tmp) {
- /*
@@ -45386,7 +45386,7 @@
if (tmp->flags & SD_POWERSAVINGS_BALANCE)
break;
if (tmp->flags & flag)
-@@ -1532,7 +1689,8 @@ static inline int wake_idle(int cpu, str
+@@ -1504,7 +1661,8 @@ static inline int wake_idle(int cpu, str
*
* returns failure only if the task is already active.
*/
@@ -45396,7 +45396,7 @@
{
int cpu, this_cpu, success = 0;
unsigned long flags;
-@@ -1544,12 +1702,20 @@ static int try_to_wake_up(struct task_st
+@@ -1516,12 +1674,20 @@ static int try_to_wake_up(struct task_st
int new_cpu;
#endif
@@ -45418,7 +45418,7 @@
goto out_running;
cpu = task_cpu(p);
-@@ -1604,11 +1770,11 @@ static int try_to_wake_up(struct task_st
+@@ -1576,11 +1742,11 @@ static int try_to_wake_up(struct task_st
* of the current CPU:
*/
if (sync)
@@ -45432,7 +45432,7 @@
/*
* This domain has SD_WAKE_AFFINE and
* p is cache cold in this domain, and
-@@ -1642,34 +1808,52 @@ out_set_cpu:
+@@ -1614,34 +1780,52 @@ out_set_cpu:
old_state = p->state;
if (!(old_state & state))
goto out;
@@ -45502,7 +45502,7 @@
/*
* Sync wakeups (i.e. those types of wakeups where the waker
* has indicated that it will leave the CPU in short order)
-@@ -1678,14 +1862,22 @@ out_activate:
+@@ -1650,14 +1834,22 @@ out_activate:
* the waker guarantees that the freshly woken up task is going
* to be considered on this CPU.)
*/
@@ -45528,7 +45528,7 @@
out:
task_rq_unlock(rq, &flags);
-@@ -1694,29 +1886,79 @@ out:
+@@ -1666,29 +1858,79 @@ out:
int fastcall wake_up_process(struct task_struct *p)
{
@@ -45618,7 +45618,7 @@
/*
* We mark the process as running here, but have not actually
-@@ -1725,16 +1967,29 @@ void fastcall sched_fork(struct task_str
+@@ -1697,16 +1939,29 @@ void fastcall sched_fork(struct task_str
* event cannot wake it up and insert it on the runqueue either.
*/
p->state = TASK_RUNNING;
@@ -45651,7 +45651,7 @@
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
#if defined(CONFIG_SMP) && defined(__ARCH_WANT_UNLOCKED_CTXSW)
-@@ -1744,34 +1999,16 @@ void fastcall sched_fork(struct task_str
+@@ -1716,34 +1971,16 @@ void fastcall sched_fork(struct task_str
/* Want to start with kernel preemption disabled. */
task_thread_info(p)->preempt_count = 1;
#endif
@@ -45692,7 +45692,7 @@
* wake_up_new_task - wake up a newly created task for the first time.
*
* This function will do some initial scheduler statistics housekeeping
-@@ -1780,108 +2017,33 @@ void fastcall sched_fork(struct task_str
+@@ -1752,108 +1989,33 @@ void fastcall sched_fork(struct task_str
*/
void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
@@ -45819,7 +45819,7 @@
}
/**
-@@ -1936,16 +2098,32 @@ static inline void finish_task_switch(st
+@@ -1908,16 +2070,32 @@ static inline void finish_task_switch(st
* be dropped twice.
* Manfred Spraul
*/
@@ -45855,7 +45855,7 @@
kprobe_flush_task(prev);
put_task_struct(prev);
}
-@@ -1958,12 +2136,15 @@ static inline void finish_task_switch(st
+@@ -1930,12 +2108,15 @@ static inline void finish_task_switch(st
asmlinkage void schedule_tail(struct task_struct *prev)
__releases(rq->lock)
{
@@ -45874,7 +45874,7 @@
#endif
if (current->set_child_tid)
put_user(current->pid, current->set_child_tid);
-@@ -1973,13 +2154,15 @@ asmlinkage void schedule_tail(struct tas
+@@ -1945,13 +2126,15 @@ asmlinkage void schedule_tail(struct tas
* context_switch - switch to the new MM and the new
* thread's register state.
*/
@@ -45893,7 +45893,7 @@
/*
* For paravirt, this is coupled with an exit in switch_to to
* combine the page table reload and the switch backend into
-@@ -1987,16 +2170,15 @@ context_switch(struct rq *rq, struct tas
+@@ -1959,16 +2142,15 @@ context_switch(struct rq *rq, struct tas
*/
arch_enter_lazy_cpu_mode();
@@ -45912,7 +45912,7 @@
rq->prev_mm = oldmm;
}
/*
-@@ -2009,10 +2191,26 @@ context_switch(struct rq *rq, struct tas
+@@ -1981,10 +2163,26 @@ context_switch(struct rq *rq, struct tas
spin_release(&rq->lock.dep_map, 1, _THIS_IP_);
#endif
@@ -45940,7 +45940,7 @@
}
/*
-@@ -2049,6 +2247,43 @@ unsigned long nr_uninterruptible(void)
+@@ -2021,6 +2219,43 @@ unsigned long nr_uninterruptible(void)
return sum;
}
@@ -45984,7 +45984,7 @@
unsigned long long nr_context_switches(void)
{
int i;
-@@ -2085,17 +2320,65 @@ unsigned long nr_active(void)
+@@ -2057,17 +2292,65 @@ unsigned long nr_active(void)
return running + uninterruptible;
}
@@ -46056,7 +46056,7 @@
/*
* double_rq_lock - safely lock two runqueues
*
-@@ -2141,7 +2424,7 @@ static void double_rq_unlock(struct rq *
+@@ -2113,7 +2396,7 @@ static void double_rq_unlock(struct rq *
/*
* double_lock_balance - lock the busiest runqueue, this_rq is locked already.
*/
@@ -46065,7 +46065,7 @@
__releases(this_rq->lock)
__acquires(busiest->lock)
__acquires(this_rq->lock)
-@@ -2156,9 +2439,12 @@ static void double_lock_balance(struct r
+@@ -2128,9 +2411,12 @@ static void double_lock_balance(struct r
spin_unlock(&this_rq->lock);
spin_lock(&busiest->lock);
spin_lock(&this_rq->lock);
@@ -46078,7 +46078,7 @@
}
/*
-@@ -2212,23 +2498,17 @@ void sched_exec(void)
+@@ -2184,23 +2470,17 @@ void sched_exec(void)
* pull_task - move a task from a remote runqueue to the local runqueue.
* Both runqueues must be locked.
*/
@@ -46107,7 +46107,7 @@
}
/*
-@@ -2236,7 +2516,7 @@ static void pull_task(struct rq *src_rq,
+@@ -2208,7 +2488,7 @@ static void pull_task(struct rq *src_rq,
*/
static
int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu,
@@ -46116,7 +46116,7 @@
int *all_pinned)
{
/*
-@@ -2253,132 +2533,67 @@ int can_migrate_task(struct task_struct
+@@ -2225,132 +2505,67 @@ int can_migrate_task(struct task_struct
return 0;
/*
@@ -46279,7 +46279,7 @@
}
out:
/*
-@@ -2390,18 +2605,48 @@ out:
+@@ -2362,18 +2577,48 @@ out:
if (all_pinned)
*all_pinned = pinned;
@@ -46330,7 +46330,7 @@
{
struct sched_group *busiest = NULL, *this = NULL, *group = sd->groups;
unsigned long max_load, avg_load, total_load, this_load, total_pwr;
-@@ -2419,9 +2664,9 @@ find_busiest_group(struct sched_domain *
+@@ -2391,9 +2636,9 @@ find_busiest_group(struct sched_domain *
max_load = this_load = total_load = total_pwr = 0;
busiest_load_per_task = busiest_nr_running = 0;
this_load_per_task = this_nr_running = 0;
@@ -46342,7 +46342,7 @@
load_idx = sd->newidle_idx;
else
load_idx = sd->idle_idx;
-@@ -2465,7 +2710,7 @@ find_busiest_group(struct sched_domain *
+@@ -2437,7 +2682,7 @@ find_busiest_group(struct sched_domain *
avg_load += load;
sum_nr_running += rq->nr_running;
@@ -46351,7 +46351,7 @@
}
/*
-@@ -2505,8 +2750,9 @@ find_busiest_group(struct sched_domain *
+@@ -2477,8 +2722,9 @@ find_busiest_group(struct sched_domain *
* Busy processors will not participate in power savings
* balance.
*/
@@ -46363,7 +46363,7 @@
/*
* If the local group is idle or completely loaded
-@@ -2516,42 +2762,42 @@ find_busiest_group(struct sched_domain *
+@@ -2488,42 +2734,42 @@ find_busiest_group(struct sched_domain *
!this_nr_running))
power_savings_balance = 0;
@@ -46431,7 +46431,7 @@
}
group_next:
#endif
-@@ -2606,7 +2852,7 @@ group_next:
+@@ -2578,7 +2824,7 @@ group_next:
* a think about bumping its value to force at least one task to be
* moved
*/
@@ -46440,7 +46440,7 @@
unsigned long tmp, pwr_now, pwr_move;
unsigned int imbn;
-@@ -2620,7 +2866,8 @@ small_imbalance:
+@@ -2592,7 +2838,8 @@ small_imbalance:
} else
this_load_per_task = SCHED_LOAD_SCALE;
@@ -46450,7 +46450,7 @@
*imbalance = busiest_load_per_task;
return busiest;
}
-@@ -2667,7 +2914,7 @@ small_imbalance:
+@@ -2639,7 +2886,7 @@ small_imbalance:
out_balanced:
#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -46459,7 +46459,7 @@
goto ret;
if (this == group_leader && group_leader != group_min) {
-@@ -2684,7 +2931,7 @@ ret:
+@@ -2656,7 +2903,7 @@ ret:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *
@@ -46468,7 +46468,7 @@
unsigned long imbalance, cpumask_t *cpus)
{
struct rq *busiest = NULL, *rq;
-@@ -2692,17 +2939,19 @@ find_busiest_queue(struct sched_group *g
+@@ -2664,17 +2911,19 @@ find_busiest_queue(struct sched_group *g
int i;
for_each_cpu_mask(i, group->cpumask) {
@@ -46491,7 +46491,7 @@
busiest = rq;
}
}
-@@ -2726,7 +2975,7 @@ static inline unsigned long minus_1_or_z
+@@ -2698,7 +2947,7 @@ static inline unsigned long minus_1_or_z
* tasks if there is an imbalance.
*/
static int load_balance(int this_cpu, struct rq *this_rq,
@@ -46500,7 +46500,7 @@
int *balance)
{
int nr_moved, all_pinned = 0, active_balance = 0, sd_idle = 0;
-@@ -2739,10 +2988,10 @@ static int load_balance(int this_cpu, st
+@@ -2711,10 +2960,10 @@ static int load_balance(int this_cpu, st
/*
* When power savings policy is enabled for the parent domain, idle
* sibling can pick up load irrespective of busy siblings. In this case,
@@ -46514,7 +46514,7 @@
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
sd_idle = 1;
-@@ -2876,7 +3125,7 @@ out_one_pinned:
+@@ -2848,7 +3097,7 @@ out_one_pinned:
* Check this_cpu to ensure it is balanced within domain. Attempt to move
* tasks if there is an imbalance.
*
@@ -46523,7 +46523,7 @@
* this_rq is locked.
*/
static int
-@@ -2893,31 +3142,31 @@ load_balance_newidle(int this_cpu, struc
+@@ -2865,31 +3114,31 @@ load_balance_newidle(int this_cpu, struc
* When power savings policy is enabled for the parent domain, idle
* sibling can pick up load irrespective of busy siblings. In this case,
* let the state of idle sibling percolate up as IDLE, instead of
@@ -46562,7 +46562,7 @@
nr_moved = 0;
if (busiest->nr_running > 1) {
-@@ -2925,7 +3174,7 @@ redo:
+@@ -2897,7 +3146,7 @@ redo:
double_lock_balance(this_rq, busiest);
nr_moved = move_tasks(this_rq, this_cpu, busiest,
minus_1_or_zero(busiest->nr_running),
@@ -46571,7 +46571,7 @@
spin_unlock(&busiest->lock);
if (!nr_moved) {
-@@ -2936,7 +3185,7 @@ redo:
+@@ -2908,7 +3157,7 @@ redo:
}
if (!nr_moved) {
@@ -46580,7 +46580,7 @@
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
return -1;
-@@ -2946,7 +3195,7 @@ redo:
+@@ -2918,7 +3167,7 @@ redo:
return nr_moved;
out_balanced:
@@ -46589,7 +46589,7 @@
if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
!test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
return -1;
-@@ -2962,8 +3211,8 @@ out_balanced:
+@@ -2934,8 +3183,8 @@ out_balanced:
static void idle_balance(int this_cpu, struct rq *this_rq)
{
struct sched_domain *sd;
@@ -46600,7 +46600,7 @@
for_each_domain(this_cpu, sd) {
unsigned long interval;
-@@ -2982,12 +3231,13 @@ static void idle_balance(int this_cpu, s
+@@ -2954,12 +3203,13 @@ static void idle_balance(int this_cpu, s
if (pulled_task)
break;
}
@@ -46615,7 +46615,7 @@
}
/*
-@@ -3031,7 +3281,7 @@ static void active_load_balance(struct r
+@@ -3003,7 +3253,7 @@ static void active_load_balance(struct r
schedstat_inc(sd, alb_cnt);
if (move_tasks(target_rq, target_cpu, busiest_rq, 1,
@@ -46624,7 +46624,7 @@
NULL))
schedstat_inc(sd, alb_pushed);
else
-@@ -3040,32 +3290,6 @@ static void active_load_balance(struct r
+@@ -3012,32 +3262,6 @@ static void active_load_balance(struct r
spin_unlock(&target_rq->lock);
}
@@ -46657,7 +46657,7 @@
#ifdef CONFIG_NO_HZ
static struct {
atomic_t load_balancer;
-@@ -3148,7 +3372,7 @@ static DEFINE_SPINLOCK(balancing);
+@@ -3120,7 +3344,7 @@ static DEFINE_SPINLOCK(balancing);
*
* Balancing parameters are set up in arch_init_sched_domains.
*/
@@ -46666,7 +46666,7 @@
{
int balance = 1;
struct rq *rq = cpu_rq(cpu);
-@@ -3162,13 +3386,16 @@ static inline void rebalance_domains(int
+@@ -3134,13 +3358,16 @@ static inline void rebalance_domains(int
continue;
interval = sd->balance_interval;
@@ -46684,7 +46684,7 @@
if (sd->flags & SD_SERIALIZE) {
if (!spin_trylock(&balancing))
-@@ -3182,7 +3409,7 @@ static inline void rebalance_domains(int
+@@ -3154,7 +3381,7 @@ static inline void rebalance_domains(int
* longer idle, or one of our SMT siblings is
* not idle.
*/
@@ -46693,7 +46693,7 @@
}
sd->last_balance = jiffies;
}
-@@ -3210,9 +3437,10 @@ out:
+@@ -3182,9 +3409,10 @@ out:
*/
static void run_rebalance_domains(struct softirq_action *h)
{
@@ -46706,7 +46706,7 @@
rebalance_domains(local_cpu, idle);
-@@ -3255,9 +3483,8 @@ static void run_rebalance_domains(struct
+@@ -3227,9 +3455,8 @@ static void run_rebalance_domains(struct
* idle load balancing owner or decide to stop the periodic load balancing,
* if the whole system is idle.
*/
@@ -46717,7 +46717,7 @@
#ifdef CONFIG_NO_HZ
/*
* If we were in the nohz mode recently and busy at the current
-@@ -3309,68 +3536,58 @@ static inline void trigger_load_balance(
+@@ -3281,68 +3508,58 @@ static inline void trigger_load_balance(
if (time_after_eq(jiffies, rq->next_balance))
raise_softirq(SCHED_SOFTIRQ);
}
@@ -46817,7 +46817,7 @@
* Account user cpu time to a process.
* @p: the process that the cpu time gets accounted to
* @hardirq_offset: the offset to subtract from hardirq_count()
-@@ -3385,7 +3602,9 @@ void account_user_time(struct task_struc
+@@ -3357,7 +3574,9 @@ void account_user_time(struct task_struc
/* Add user time to cpustat. */
tmp = cputime_to_cputime64(cputime);
@@ -46828,7 +46828,7 @@
cpustat->nice = cputime64_add(cpustat->nice, tmp);
else
cpustat->user = cputime64_add(cpustat->user, tmp);
-@@ -3408,10 +3627,12 @@ void account_system_time(struct task_str
+@@ -3380,10 +3599,12 @@ void account_system_time(struct task_str
/* Add system time to cpustat. */
tmp = cputime_to_cputime64(cputime);
@@ -46843,7 +46843,7 @@
else if (p != rq->idle)
cpustat->system = cputime64_add(cpustat->system, tmp);
else if (atomic_read(&rq->nr_iowait) > 0)
-@@ -3443,81 +3664,6 @@ void account_steal_time(struct task_stru
+@@ -3415,81 +3636,6 @@ void account_steal_time(struct task_stru
cpustat->steal = cputime64_add(cpustat->steal, tmp);
}
@@ -46925,7 +46925,7 @@
/*
* This function gets called by the timer code, with HZ frequency.
* We call it with interrupts disabled.
-@@ -3527,232 +3673,211 @@ out_unlock:
+@@ -3499,232 +3645,211 @@ out_unlock:
*/
void scheduler_tick(void)
{
@@ -47310,7 +47310,7 @@
/*
* this is the entry point to schedule() from in-kernel preemption
* off of preempt_enable. Kernel preemptions off return from interrupt
-@@ -3765,6 +3890,8 @@ asmlinkage void __sched preempt_schedule
+@@ -3737,6 +3862,8 @@ asmlinkage void __sched preempt_schedule
struct task_struct *task = current;
int saved_lock_depth;
#endif
@@ -47319,7 +47319,7 @@
/*
* If there is a non-zero preempt_count or interrupts are disabled,
* we do not want to preempt the current task. Just return..
-@@ -3773,6 +3900,7 @@ asmlinkage void __sched preempt_schedule
+@@ -3745,6 +3872,7 @@ asmlinkage void __sched preempt_schedule
return;
need_resched:
@@ -47327,7 +47327,7 @@
add_preempt_count(PREEMPT_ACTIVE);
/*
* We keep the big kernel semaphore locked, but we
-@@ -3783,24 +3911,25 @@ need_resched:
+@@ -3755,24 +3883,25 @@ need_resched:
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
#endif
@@ -47361,7 +47361,7 @@
*/
asmlinkage void __sched preempt_schedule_irq(void)
{
-@@ -3809,10 +3938,18 @@ asmlinkage void __sched preempt_schedule
+@@ -3781,10 +3910,18 @@ asmlinkage void __sched preempt_schedule
struct task_struct *task = current;
int saved_lock_depth;
#endif
@@ -47382,7 +47382,7 @@
add_preempt_count(PREEMPT_ACTIVE);
/*
* We keep the big kernel semaphore locked, but we
-@@ -3823,17 +3960,17 @@ need_resched:
+@@ -3795,17 +3932,17 @@ need_resched:
saved_lock_depth = task->lock_depth;
task->lock_depth = -1;
#endif
@@ -47405,7 +47405,7 @@
goto need_resched;
}
-@@ -3842,7 +3979,8 @@ need_resched:
+@@ -3814,7 +3951,8 @@ need_resched:
int default_wake_function(wait_queue_t *curr, unsigned mode, int sync,
void *key)
{
@@ -47415,7 +47415,7 @@
}
EXPORT_SYMBOL(default_wake_function);
-@@ -3883,8 +4021,9 @@ void fastcall __wake_up(wait_queue_head_
+@@ -3855,8 +3993,9 @@ void fastcall __wake_up(wait_queue_head_
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
@@ -47426,7 +47426,7 @@
}
EXPORT_SYMBOL(__wake_up);
-@@ -3934,8 +4073,9 @@ void fastcall complete(struct completion
+@@ -3906,8 +4045,9 @@ void fastcall complete(struct completion
spin_lock_irqsave(&x->wait.lock, flags);
x->done++;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
@@ -47437,7 +47437,7 @@
}
EXPORT_SYMBOL(complete);
-@@ -3946,11 +4086,18 @@ void fastcall complete_all(struct comple
+@@ -3918,11 +4058,18 @@ void fastcall complete_all(struct comple
spin_lock_irqsave(&x->wait.lock, flags);
x->done += UINT_MAX/2;
__wake_up_common(&x->wait, TASK_UNINTERRUPTIBLE | TASK_INTERRUPTIBLE,
@@ -47457,7 +47457,7 @@
void fastcall __sched wait_for_completion(struct completion *x)
{
might_sleep();
-@@ -4073,7 +4220,6 @@ out:
+@@ -4045,7 +4192,6 @@ out:
}
EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
@@ -47465,7 +47465,7 @@
#define SLEEP_ON_VAR \
unsigned long flags; \
wait_queue_t wait; \
-@@ -4157,29 +4303,52 @@ EXPORT_SYMBOL(sleep_on_timeout);
+@@ -4129,29 +4275,52 @@ EXPORT_SYMBOL(sleep_on_timeout);
*/
void rt_mutex_setprio(struct task_struct *p, int prio)
{
@@ -47531,7 +47531,7 @@
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
-@@ -4188,9 +4357,13 @@ void rt_mutex_setprio(struct task_struct
+@@ -4160,9 +4329,13 @@ void rt_mutex_setprio(struct task_struct
if (task_running(rq, p)) {
if (p->prio > oldprio)
resched_task(rq->curr);
@@ -47547,7 +47547,7 @@
task_rq_unlock(rq, &flags);
}
-@@ -4198,10 +4371,10 @@ void rt_mutex_setprio(struct task_struct
+@@ -4170,10 +4343,10 @@ void rt_mutex_setprio(struct task_struct
void set_user_nice(struct task_struct *p, long nice)
{
@@ -47560,7 +47560,7 @@
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
-@@ -4210,20 +4383,21 @@ void set_user_nice(struct task_struct *p
+@@ -4182,20 +4355,21 @@ void set_user_nice(struct task_struct *p
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
@@ -47588,7 +47588,7 @@
}
p->static_prio = NICE_TO_PRIO(nice);
-@@ -4232,9 +4406,9 @@ void set_user_nice(struct task_struct *p
+@@ -4204,9 +4378,9 @@ void set_user_nice(struct task_struct *p
p->prio = effective_prio(p);
delta = p->prio - old_prio;
@@ -47601,7 +47601,7 @@
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
-@@ -4354,20 +4528,29 @@ static inline struct task_struct *find_p
+@@ -4326,20 +4500,29 @@ static inline struct task_struct *find_p
}
/* Actually do priority change: must hold rq lock. */
@@ -47638,7 +47638,7 @@
set_load_weight(p);
}
-@@ -4382,8 +4565,7 @@ static void __setscheduler(struct task_s
+@@ -4354,8 +4537,7 @@ static void __setscheduler(struct task_s
int sched_setscheduler(struct task_struct *p, int policy,
struct sched_param *param)
{
@@ -47648,7 +47648,7 @@
unsigned long flags;
struct rq *rq;
-@@ -4394,27 +4576,27 @@ recheck:
+@@ -4366,27 +4548,27 @@ recheck:
if (policy < 0)
policy = oldpolicy = p->policy;
else if (policy != SCHED_FIFO && policy != SCHED_RR &&
@@ -47682,7 +47682,7 @@
if (!lock_task_sighand(p, &flags))
return -ESRCH;
-@@ -4430,6 +4612,12 @@ recheck:
+@@ -4402,6 +4584,12 @@ recheck:
param->sched_priority > rlim_rtprio)
return -EPERM;
}
@@ -47695,7 +47695,7 @@
/* can't change other user's priorities */
if ((current->euid != p->euid) &&
-@@ -4457,13 +4645,13 @@ recheck:
+@@ -4429,13 +4617,13 @@ recheck:
spin_unlock_irqrestore(&p->pi_lock, flags);
goto recheck;
}
@@ -47715,7 +47715,7 @@
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
-@@ -4472,8 +4660,9 @@ recheck:
+@@ -4444,8 +4632,9 @@ recheck:
if (task_running(rq, p)) {
if (p->prio > oldprio)
resched_task(rq->curr);
@@ -47727,7 +47727,7 @@
}
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);
-@@ -4745,59 +4934,37 @@ asmlinkage long sys_sched_getaffinity(pi
+@@ -4717,59 +4906,37 @@ asmlinkage long sys_sched_getaffinity(pi
/**
* sys_sched_yield - yield the current processor to other threads.
*
@@ -47799,7 +47799,7 @@
__might_sleep(__FILE__, __LINE__);
#endif
/*
-@@ -4806,10 +4973,11 @@ static void __cond_resched(void)
+@@ -4778,10 +4945,11 @@ static void __cond_resched(void)
* cond_resched() call.
*/
do {
@@ -47813,7 +47813,7 @@
}
int __sched cond_resched(void)
-@@ -4831,32 +4999,53 @@ EXPORT_SYMBOL(cond_resched);
+@@ -4803,32 +4971,53 @@ EXPORT_SYMBOL(cond_resched);
* operations here to prevent schedule() from being called twice (once via
* spin_unlock(), once by hand).
*/
@@ -47875,7 +47875,7 @@
if (need_resched() && system_state == SYSTEM_RUNNING) {
local_bh_enable();
__cond_resched();
-@@ -4867,17 +5056,101 @@ int __sched cond_resched_softirq(void)
+@@ -4839,17 +5028,101 @@ int __sched cond_resched_softirq(void)
}
EXPORT_SYMBOL(cond_resched_softirq);
@@ -47978,7 +47978,7 @@
EXPORT_SYMBOL(yield);
/*
-@@ -4930,6 +5203,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4902,6 +5175,8 @@ asmlinkage long sys_sched_get_priority_m
break;
case SCHED_NORMAL:
case SCHED_BATCH:
@@ -47987,7 +47987,7 @@
ret = 0;
break;
}
-@@ -4954,6 +5229,8 @@ asmlinkage long sys_sched_get_priority_m
+@@ -4926,6 +5201,8 @@ asmlinkage long sys_sched_get_priority_m
break;
case SCHED_NORMAL:
case SCHED_BATCH:
@@ -47996,7 +47996,7 @@
ret = 0;
}
return ret;
-@@ -4988,7 +5265,7 @@ long sys_sched_rr_get_interval(pid_t pid
+@@ -4960,7 +5237,7 @@ long sys_sched_rr_get_interval(pid_t pid
goto out_unlock;
jiffies_to_timespec(p->policy == SCHED_FIFO ?
@@ -48005,7 +48005,7 @@
read_unlock(&tasklist_lock);
retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0;
out_nounlock:
-@@ -4998,7 +5275,7 @@ out_unlock:
+@@ -4970,7 +5247,7 @@ out_unlock:
return retval;
}
@@ -48014,7 +48014,7 @@
static void show_task(struct task_struct *p)
{
-@@ -5006,19 +5283,23 @@ static void show_task(struct task_struct
+@@ -4978,19 +5255,23 @@ static void show_task(struct task_struct
unsigned state;
state = p->state ? __ffs(p->state) + 1 : 0;
@@ -48042,7 +48042,7 @@
#ifdef CONFIG_DEBUG_STACK_USAGE
{
unsigned long *n = end_of_stack(p);
-@@ -5027,11 +5308,7 @@ static void show_task(struct task_struct
+@@ -4999,11 +5280,7 @@ static void show_task(struct task_struct
free = (unsigned long)n - (unsigned long)end_of_stack(p);
}
#endif
@@ -48055,7 +48055,7 @@
if (state != TASK_RUNNING)
show_stack(p, NULL);
-@@ -5040,17 +5317,25 @@ static void show_task(struct task_struct
+@@ -5012,17 +5289,25 @@ static void show_task(struct task_struct
void show_state_filter(unsigned long state_filter)
{
struct task_struct *g, *p;
@@ -48088,7 +48088,7 @@
do_each_thread(g, p) {
/*
* reset the NMI-timeout, listing all files on a slow
-@@ -5063,12 +5348,22 @@ void show_state_filter(unsigned long sta
+@@ -5035,12 +5320,22 @@ void show_state_filter(unsigned long sta
touch_all_softlockup_watchdogs();
@@ -48112,7 +48112,7 @@
}
/**
-@@ -5084,13 +5379,12 @@ void __cpuinit init_idle(struct task_str
+@@ -5056,13 +5351,12 @@ void __cpuinit init_idle(struct task_str
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
@@ -48130,7 +48130,7 @@
spin_lock_irqsave(&rq->lock, flags);
rq->curr = rq->idle = idle;
-@@ -5100,11 +5394,17 @@ void __cpuinit init_idle(struct task_str
+@@ -5072,11 +5366,17 @@ void __cpuinit init_idle(struct task_str
spin_unlock_irqrestore(&rq->lock, flags);
/* Set the preempt count _outside_ the spinlocks! */
@@ -48149,7 +48149,7 @@
}
/*
-@@ -5116,6 +5416,28 @@ void __cpuinit init_idle(struct task_str
+@@ -5088,6 +5388,28 @@ void __cpuinit init_idle(struct task_str
*/
cpumask_t nohz_cpu_mask = CPU_MASK_NONE;
@@ -48178,7 +48178,7 @@
#ifdef CONFIG_SMP
/*
* This is how migration works:
-@@ -5189,11 +5511,18 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
+@@ -5161,11 +5483,18 @@ EXPORT_SYMBOL_GPL(set_cpus_allowed);
static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
struct rq *rq_dest, *rq_src;
@@ -48198,7 +48198,7 @@
rq_src = cpu_rq(src_cpu);
rq_dest = cpu_rq(dest_cpu);
-@@ -5205,24 +5534,20 @@ static int __migrate_task(struct task_st
+@@ -5177,24 +5506,20 @@ static int __migrate_task(struct task_st
if (!cpu_isset(dest_cpu, p->cpus_allowed))
goto out;
@@ -48232,7 +48232,7 @@
return ret;
}
-@@ -5370,7 +5695,8 @@ static void migrate_live_tasks(int src_c
+@@ -5342,7 +5667,8 @@ static void migrate_live_tasks(int src_c
write_unlock_irq(&tasklist_lock);
}
@@ -48242,7 +48242,7 @@
* It does so by boosting its priority to highest possible and adding it to
* the _front_ of the runqueue. Used by CPU offline code.
*/
-@@ -5390,10 +5716,10 @@ void sched_idle_next(void)
+@@ -5362,10 +5688,10 @@ void sched_idle_next(void)
*/
spin_lock_irqsave(&rq->lock, flags);
@@ -48255,7 +48255,7 @@
spin_unlock_irqrestore(&rq->lock, flags);
}
-@@ -5443,16 +5769,15 @@ static void migrate_dead(unsigned int de
+@@ -5415,16 +5741,15 @@ static void migrate_dead(unsigned int de
static void migrate_dead_tasks(unsigned int dead_cpu)
{
struct rq *rq = cpu_rq(dead_cpu);
@@ -48280,7 +48280,7 @@
}
}
#endif /* CONFIG_HOTPLUG_CPU */
-@@ -5483,7 +5808,7 @@ migration_call(struct notifier_block *nf
+@@ -5455,7 +5780,7 @@ migration_call(struct notifier_block *nf
kthread_bind(p, cpu);
/* Must be high prio: stop_machine expects to yield to it. */
rq = task_rq_lock(p, &flags);
@@ -48289,7 +48289,7 @@
task_rq_unlock(rq, &flags);
cpu_rq(cpu)->migration_thread = p;
break;
-@@ -5514,9 +5839,10 @@ migration_call(struct notifier_block *nf
+@@ -5486,9 +5811,10 @@ migration_call(struct notifier_block *nf
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
rq = task_rq_lock(rq->idle, &flags);
@@ -48302,7 +48302,7 @@
migrate_dead_tasks(cpu);
task_rq_unlock(rq, &flags);
migrate_nr_uninterruptible(rq);
-@@ -5825,483 +6151,6 @@ init_sched_build_groups(cpumask_t span,
+@@ -5797,483 +6123,6 @@ init_sched_build_groups(cpumask_t span,
#define SD_NODES_PER_DOMAIN 16
@@ -48786,7 +48786,7 @@
#ifdef CONFIG_NUMA
/**
-@@ -6602,7 +6451,6 @@ static void init_sched_groups_power(int
+@@ -6574,7 +6423,6 @@ static void init_sched_groups_power(int
static int build_sched_domains(const cpumask_t *cpu_map)
{
int i;
@@ -48794,7 +48794,7 @@
#ifdef CONFIG_NUMA
struct sched_group **sched_group_nodes = NULL;
int sd_allnodes = 0;
-@@ -6747,6 +6595,7 @@ static int build_sched_domains(const cpu
+@@ -6719,6 +6567,7 @@ static int build_sched_domains(const cpu
sched_group_nodes[i] = sg;
for_each_cpu_mask(j, nodemask) {
struct sched_domain *sd;
@@ -48802,7 +48802,7 @@
sd = &per_cpu(node_domains, j);
sd->groups = sg;
}
-@@ -6791,19 +6640,22 @@ static int build_sched_domains(const cpu
+@@ -6763,19 +6612,22 @@ static int build_sched_domains(const cpu
/* Calculate CPU power for physical packages and nodes */
#ifdef CONFIG_SCHED_SMT
for_each_cpu_mask(i, *cpu_map) {
@@ -48828,7 +48828,7 @@
init_sched_groups_power(i, sd);
}
-@@ -6831,10 +6683,6 @@ static int build_sched_domains(const cpu
+@@ -6803,10 +6655,6 @@ static int build_sched_domains(const cpu
#endif
cpu_attach_domain(sd, i);
}
@@ -48839,7 +48839,7 @@
return 0;
-@@ -7041,10 +6889,12 @@ void __init sched_init_smp(void)
+@@ -7013,10 +6861,12 @@ void __init sched_init_smp(void)
/* Move init over to a non-isolated CPU */
if (set_cpus_allowed(current, non_isolated_cpus) < 0)
BUG();
@@ -48852,7 +48852,7 @@
}
#endif /* CONFIG_SMP */
-@@ -7058,10 +6908,27 @@ int in_sched_functions(unsigned long add
+@@ -7030,10 +6880,27 @@ int in_sched_functions(unsigned long add
&& addr < (unsigned long)__sched_text_end);
}
@@ -48881,7 +48881,7 @@
for_each_possible_cpu(i) {
struct prio_array *array;
-@@ -7071,15 +6938,21 @@ void __init sched_init(void)
+@@ -7043,15 +6910,21 @@ void __init sched_init(void)
spin_lock_init(&rq->lock);
lockdep_set_class(&rq->lock, &rq->rq_lock_key);
rq->nr_running = 0;
@@ -48908,7 +48908,7 @@
rq->push_cpu = 0;
rq->cpu = i;
rq->migration_thread = NULL;
-@@ -7087,16 +6960,14 @@ void __init sched_init(void)
+@@ -7059,16 +6932,14 @@ void __init sched_init(void)
#endif
atomic_set(&rq->nr_iowait, 0);
@@ -48931,7 +48931,7 @@
}
set_load_weight(&init_task);
-@@ -7116,6 +6987,9 @@ void __init sched_init(void)
+@@ -7088,6 +6959,9 @@ void __init sched_init(void)
atomic_inc(&init_mm.mm_count);
enter_lazy_tlb(&init_mm, current);
@@ -48941,7 +48941,7 @@
/*
* Make us the idle thread. Technically, schedule() should not be
* called from this thread, however somewhere below it might be,
-@@ -7123,9 +6997,13 @@ void __init sched_init(void)
+@@ -7095,9 +6969,13 @@ void __init sched_init(void)
* when this runqueue becomes "idle".
*/
init_idle(current, smp_processor_id());
@@ -48956,7 +48956,7 @@
void __might_sleep(char *file, int line)
{
#ifdef in_atomic
-@@ -7133,13 +7011,17 @@ void __might_sleep(char *file, int line)
+@@ -7105,13 +6983,17 @@ void __might_sleep(char *file, int line)
if ((in_atomic() || irqs_disabled()) &&
system_state == SYSTEM_RUNNING && !oops_in_progress) {
@@ -48977,7 +48977,7 @@
debug_show_held_locks(current);
if (irqs_disabled())
print_irqtrace_events(current);
-@@ -7153,29 +7035,55 @@ EXPORT_SYMBOL(__might_sleep);
+@@ -7125,29 +7007,55 @@ EXPORT_SYMBOL(__might_sleep);
#ifdef CONFIG_MAGIC_SYSRQ
void normalize_rt_tasks(void)
{
@@ -49043,7 +49043,7 @@
__task_rq_unlock(rq);
spin_unlock_irqrestore(&p->pi_lock, flags);
} while_each_thread(g, p);
-@@ -7229,6 +7137,25 @@ void set_curr_task(int cpu, struct task_
+@@ -7201,6 +7109,25 @@ void set_curr_task(int cpu, struct task_
#endif
@@ -52720,7 +52720,7 @@
/* The default sysctl tables: */
-@@ -202,14 +207,110 @@ static ctl_table root_table[] = {
+@@ -202,12 +207,108 @@ static ctl_table root_table[] = {
.mode = 0555,
.child = dev_table,
},
@@ -52748,12 +52748,10 @@
{ .ctl_name = 0 }
};
- extern int affinity_load_balancing;
-
-+static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
-+static unsigned long max_sched_granularity_ns = 1000000000; /* 1 second */
-+static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
-+static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
++static unsigned long min_sched_granularity_ns = 100000; /* 100 usecs */
++static unsigned long max_sched_granularity_ns = 1000000000; /* 1 second */
++static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
++static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
+
static ctl_table kern_table[] = {
{
@@ -52831,7 +52829,7 @@
.ctl_name = KERN_PANIC,
.procname = "panic",
.data = &panic_timeout,
-@@ -217,6 +318,208 @@ static ctl_table kern_table[] = {
+@@ -215,6 +316,208 @@ static ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = &proc_dointvec,
},
diff -urN --exclude=CVS --exclude=.cvsignore --exclude=.svn --exclude=.svnignore old/patches.rt/preempt_sched_hooks.patch new/patches.rt/preempt_sched_hooks.patch
--- old/patches.rt/preempt_sched_hooks.patch 1970-01-01 01:00:00.000000000 +0100
+++ new/patches.rt/preempt_sched_hooks.patch 2007-07-22 01:42:10.000000000 +0200
@@ -0,0 +1,250 @@
+From kvm-devel-bounces@lists.sourceforge.net Wed Jul 11 09:55:36 2007
+Return-path:
+Received: from AMPHION.novell.com ([130.57.1.22]) by
+ lucius.provo.novell.com with ESMTP; Wed, 11 Jul 2007 09:55:36 -0600
+Received: from lists-outbound.sourceforge.net
+ (lists-outbound.sourceforge.net [66.35.250.225]) by AMPHION.novell.com (TMA
+ SMTPRS 4.4.568.30) with ESMTP id for
+ ; Wed, 11 Jul 2007 09:55:10 -0600
+Received-SPF: pass (AMPHION.novell.com: domain of
+ kvm-devel-bounces@lists.sourceforge.net designates 66.35.250.225 as
+ permitted sender)
+ receiver=AMPHION.novell.com;client-ip=66.35.250.225;envelope-from=;helo=lists-outbound.sourceforge.net;
+X-IADB2: None
+X-Bonded-Senders: None
+X-Habeas: None
+X-Cloudmark-Rating: 0 <127.0.50.0>
+X-IDDB: None
+X-Modus-ReverseDNS: OK
+X-Modus-BlackList:
+ 66.35.250.225=OK;kvm-devel-bounces@lists.sourceforge.net=OK
+X-Modus-RBL: 66.35.250.225=OK
+X-Modus-Trusted: 66.35.250.225=NO
+X-Modus-Audit: FALSE;0;0;0
+Received: from sc8-sf-list2-new.sourceforge.net
+ (sc8-sf-list2-new-b.sourceforge.net [10.3.1.94]) by
+ sc8-sf-spam2.sourceforge.net (Postfix) with ESMTP id 728151268B; Wed, 11
+ Jul 2007 08:55:33 -0700 (PDT)
+Received: from sc8-sf-mx2-b.sourceforge.net ([10.3.1.92]
+ helo=mail.sourceforge.net) by sc8-sf-list2-new.sourceforge.net with esmtp
+ (Exim 4.43) id 1I8eXH-0001ii-Lx for kvm-devel@lists.sourceforge.net; Wed,
+ 11 Jul 2007 08:55:27 -0700
+Received: from il.qumranet.com ([82.166.9.18]) by mail.sourceforge.net with
+ esmtp (Exim 4.44) id 1I8eXK-0005IU-Np for kvm-devel@lists.sourceforge.net;
+ Wed, 11 Jul 2007 08:55:31 -0700
+Received: by il.qumranet.com (Postfix, from userid 500) id 5A4252508FB;
+ Wed, 11 Jul 2007 18:55:33 +0300 (IDT)
+From: Avi Kivity
+To: kvm-devel@lists.sourceforge.net
+Date: Wed, 11 Jul 2007 18:55:32 +0300
+X-Mailer: git-send-email 1.5.2.3
+In-Reply-To: <11841693332609-git-send-email-avi@qumranet.com>
+References: <11841693332609-git-send-email-avi@qumranet.com>
+X-Spam-Score: 0.0 (/)
+X-Spam-Report: Spam Filtering performed by sourceforge.net. See
+ http://spamassassin.org/tag/ for more details. Report problems to
+ http://sf.net/tracker/?func=add&group_id=1&atid=200001
+Cc: linux-kernel@vger.kernel.org
+Subject: [kvm-devel] [PATCH 1/2] SCHED: Generic hooks for trapping task
+ preemption
+X-BeenThere: kvm-devel@lists.sourceforge.net
+X-Mailman-Version: 2.1.8
+Precedence: list
+List-Id: kernel virtual machine development
+
+List-Unsubscribe: https://lists.sourceforge.net/lists/listinfo/kvm-devel,
+ mailto:kvm-devel-request@lists.sourceforge.net?subject=unsubscribe
+List-Archive:
+ http://sourceforge.net/mailarchive/forum.php?forum_name=kvm-devel
+List-Post: mailto:kvm-devel@lists.sourceforge.net
+List-Help: mailto:kvm-devel-request@lists.sourceforge.net?subject=help
+List-Subscribe: https://lists.sourceforge.net/lists/listinfo/kvm-devel,
+ mailto:kvm-devel-request@lists.sourceforge.net?subject=subscribe
+MIME-Version: 1.0
+Content-Type: text/plain; charset="us-ascii"
+Sender: kvm-devel-bounces@lists.sourceforge.net
+Errors-To: kvm-devel-bounces@lists.sourceforge.net
+Message-Id: <4694C554.WALTHAM.WAL-1.100.1696C67.1.13BD4.1@1:7.WALTHAM.WAL-1.100.0.1.0.1@16>
+X-Evolution-Source: groupwise://ghaskins@wal-1.novell.com/
+Content-Transfer-Encoding: 8bit
+
+This adds a general mechanism whereby a task can request the scheduler to
+notify it whenever it is preempted or scheduled back in. This allows the
+task to swap any special-purpose registers like the fpu or Intel's VT
+registers.
+
+Signed-off-by: Avi Kivity
+---
+ include/linux/preempt.h | 27 +++++++++++++++++++++++
+ include/linux/sched.h | 4 +++
+ kernel/Kconfig.preempt | 4 +++
+ kernel/sched.c | 54 +++++++++++++++++++++++++++++++++++++++++++++++
+ 4 files changed, 89 insertions(+), 0 deletions(-)
+
+Index: linux-2.6.22/include/linux/preempt.h
+===================================================================
+--- linux-2.6.22.orig/include/linux/preempt.h
++++ linux-2.6.22/include/linux/preempt.h
+@@ -9,6 +9,7 @@
+ #include
+ #include
+ #include
++#include
+
+ #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_CRITICAL_TIMING)
+ extern void notrace add_preempt_count(unsigned int val);
+@@ -93,4 +94,30 @@ do { \
+
+ #endif
+
++#ifdef CONFIG_PREEMPT_HOOKS
++
++struct preempt_hook;
++
++struct preempt_ops {
++ void (*sched_in)(struct preempt_hook *hook, int cpu);
++ void (*sched_out)(struct preempt_hook *hook);
++};
++
++struct preempt_hook {
++ struct hlist_node link;
++ struct preempt_ops *ops;
++};
++
++void preempt_hook_register(struct preempt_hook *hook);
++void preempt_hook_unregister(struct preempt_hook *hook);
++
++static inline void preempt_hook_init(struct preempt_hook *hook,
++ struct preempt_ops *ops)
++{
++ INIT_HLIST_NODE(&hook->link);
++ hook->ops = ops;
++}
++
++#endif
++
+ #endif /* __LINUX_PREEMPT_H */
+Index: linux-2.6.22/include/linux/sched.h
+===================================================================
+--- linux-2.6.22.orig/include/linux/sched.h
++++ linux-2.6.22/include/linux/sched.h
+@@ -1070,6 +1070,10 @@ struct task_struct {
+ struct list_head run_list;
+ struct sched_entity se;
+
++#ifdef CONFIG_PREEMPT_HOOKS
++ struct hlist_head preempt_hooks; /* list of struct preempt_hook */
++#endif
++
+ unsigned short ioprio;
+ #ifdef CONFIG_BLK_DEV_IO_TRACE
+ unsigned int btrace_seq;
+Index: linux-2.6.22/kernel/Kconfig.preempt
+===================================================================
+--- linux-2.6.22.orig/kernel/Kconfig.preempt
++++ linux-2.6.22/kernel/Kconfig.preempt
+@@ -133,6 +133,11 @@ config SPINLOCK_BKL
+ Say Y here if you are building a kernel for a desktop system.
+ Say N if you are unsure.
+
++config PREEMPT_HOOKS
++ bool
++ depends on X86
++ default y
++
+ config PREEMPT_BKL
+ bool
+ depends on PREEMPT_RT || !SPINLOCK_BKL
+Index: linux-2.6.22/kernel/sched.c
+===================================================================
+--- linux-2.6.22.orig/kernel/sched.c
++++ linux-2.6.22/kernel/sched.c
+@@ -1960,6 +1960,10 @@ static void __sched_fork(struct task_str
+ INIT_LIST_HEAD(&p->run_list);
+ p->se.on_rq = 0;
+
++#ifdef CONFIG_PREEMPT_HOOKS
++ INIT_HLIST_HEAD(&p->preempt_hooks);
++#endif
++
+ /*
+ * We mark the process as running here, but have not actually
+ * inserted it onto the runqueue yet. This guarantees that
+@@ -2046,6 +2050,50 @@ void sched_dead(struct task_struct *p)
+ WARN_ON_ONCE(p->se.on_rq);
+ }
+
++#ifdef CONFIG_PREEMPT_HOOKS
++
++void preempt_hook_register(struct preempt_hook *hook)
++{
++ hlist_add_head(&hook->link, ¤t->preempt_hooks);
++}
++EXPORT_SYMBOL_GPL(preempt_hook_register);
++
++void preempt_hook_unregister(struct preempt_hook *hook)
++{
++ hlist_del(&hook->link);
++}
++EXPORT_SYMBOL_GPL(preempt_hook_unregister);
++
++static void fire_sched_in_preempt_hooks(struct task_struct *tsk)
++{
++ struct preempt_hook *hook;
++ struct hlist_node *node;
++
++ hlist_for_each_entry(hook, node, &tsk->preempt_hooks, link)
++ hook->ops->sched_in(hook, raw_smp_processor_id());
++}
++
++static void fire_sched_out_preempt_hooks(struct task_struct *tsk)
++{
++ struct preempt_hook *hook;
++ struct hlist_node *node;
++
++ hlist_for_each_entry(hook, node, &tsk->preempt_hooks, link)
++ hook->ops->sched_out(hook);
++}
++
++#else
++
++static void fire_sched_in_preempt_hooks(struct task_struct *tsk)
++{
++}
++
++static void fire_sched_out_preempt_hooks(struct task_struct *tsk)
++{
++}
++
++#endif
++
+ /**
+ * prepare_task_switch - prepare to switch tasks
+ * @rq: the runqueue preparing to switch
+@@ -2060,6 +2108,7 @@ void sched_dead(struct task_struct *p)
+ */
+ static inline void prepare_task_switch(struct rq *rq, struct task_struct *next)
+ {
++ fire_sched_out_preempt_hooks(current);
+ prepare_lock_switch(rq, next);
+ prepare_arch_switch(next);
+ }
+@@ -2111,6 +2160,7 @@ static inline void finish_task_switch(st
+ prev_state = prev->state;
+ _finish_arch_switch(prev);
+ finish_lock_switch(rq, prev);
++ fire_sched_in_preempt_hooks(current);
+ trace_stop_sched_switched(current);
+ /*
+ * Delay the final freeing of the mm or task, so that we dont have
+@@ -6972,6 +7022,10 @@ void __init sched_init(void)
+
+ set_load_weight(&init_task);
+
++#ifdef CONFIG_PREEMPT_HOOKS
++ INIT_HLIST_HEAD(&init_task.preempt_hooks);
++#endif
++
+ #ifdef CONFIG_SMP
+ nr_cpu_ids = highest_cpu + 1;
+ open_softirq(SCHED_SOFTIRQ, run_rebalance_domains, NULL);
++++++ series.conf ++++++
--- kernel-source/series.conf 2007-07-21 18:09:13.000000000 +0200
+++ /mounts/work_src_done/STABLE/kernel-source/series.conf 2007-07-22 11:56:35.000000000 +0200
@@ -361,6 +361,8 @@
# Other drivers we have added to the tree
########################################################
patches.drivers/nozomi.patch
+ patches.drivers/uio.patch
+ patches.drivers/uio-documentation.patch
########################################################
# Suspend/Resume stuff
@@ -538,7 +540,7 @@
patches.suse/filp-slab-rcu
patches.suse/ext2-fsync-err
patches.fixes/remount-no-shrink-dcache
- patches.suse/sysctl-add-affinity_load_balancing
+-rt patches.suse/sysctl-add-affinity_load_balancing
patches.fixes/loop-barriers
patches.fixes/loop-barriers2
@@ -653,6 +655,7 @@
# RT
+rt patches.rt/patch-2.6.22.1-rt4.openSUSE
++rt patches.rt/preempt_sched_hooks.patch
+rt patches.rt/redeclare_kdb_lock_as_raw.patch
+rt patches.rt/kdb-i386-compile-bugfixes
+rt patches.rt/kdb-memmap-cmd-fixups
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Remember to have fun...
---------------------------------------------------------------------
To unsubscribe, e-mail: opensuse-commit+unsubscribe@opensuse.org
For additional commands, e-mail: opensuse-commit+help@opensuse.org