commit libhugetlbfs for openSUSE:Factory
Hello community,
here is the log from the commit of package libhugetlbfs for openSUSE:Factory
checked in at Thu Apr 21 13:39:11 CEST 2011.
--------
--- libhugetlbfs/libhugetlbfs.changes 2010-10-06 18:55:20.000000000 +0200
+++ /mounts/work_src_done/STABLE/libhugetlbfs/libhugetlbfs.changes 2010-12-17 12:47:56.000000000 +0100
@@ -1,0 +2,7 @@
+Fri Dec 17 11:45:36 UTC 2010 - trenn@novell.com
+
+- Update to version 2.11
+ Bugfixes and new features are listed in the NEWS file in
+ /usr/share/doc/packages/libhugetlbfs/NEWS
+
+-------------------------------------------------------------------
calling whatdependson for head-i586
Old:
----
libhugetlbfs-2.9.tar.bz2
New:
----
libhugetlbfs-2.11.tar.bz2
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ libhugetlbfs.spec ++++++
--- /var/tmp/diff_new_pack.HryJZ6/_old 2011-04-21 13:38:59.000000000 +0200
+++ /var/tmp/diff_new_pack.HryJZ6/_new 2011-04-21 13:38:59.000000000 +0200
@@ -1,7 +1,7 @@
#
-# spec file for package libhugetlbfs (Version 2.9)
+# spec file for package libhugetlbfs
#
-# Copyright (c) 2010 SUSE LINUX Products GmbH, Nuernberg, Germany.
+# Copyright (c) 2011 SUSE LINUX Products GmbH, Nuernberg, Germany.
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@@ -28,7 +28,7 @@
Obsoletes: libhugetlbfs-64bit
%endif
#
-Version: 2.9
+Version: 2.11
Release: 1
Url: http://libhugetlbfs.sourceforge.net/
BuildRoot: %{_tmppath}/%{name}-%{version}-build
++++++ libhugetlbfs-2.9.tar.bz2 -> libhugetlbfs-2.11.tar.bz2 ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/Makefile new/libhugetlbfs-2.11/Makefile
--- old/libhugetlbfs-2.9/Makefile 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/Makefile 2010-12-16 18:38:22.000000000 +0100
@@ -1,5 +1,5 @@
-PREFIX = /usr/local
-EXEDIR = /bin
+PREFIX ?= /usr/local
+EXEDIR ?= /bin
LIBOBJS = hugeutils.o version.o init.o morecore.o debug.o alloc.o shm.o kernel-features.o
LIBPUOBJS = init_privutils.o debug.o hugeutils.o kernel-features.o
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/NEWS new/libhugetlbfs-2.11/NEWS
--- old/libhugetlbfs-2.9/NEWS 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/NEWS 2010-12-16 18:38:22.000000000 +0100
@@ -1,3 +1,27 @@
+libhugetlbfs 2.11 "Ghost Pepper"
+======================================================================
+New Features
+* cpupcstat reports time servicing tlb misses when requested
+* When supported by the kernel and glibc, MAP_HUGETLB is used
+ for the heap and to back memory returned by get_huge_pages.
+ These features can now be used without mounting hugetlbfs
+
+Bug Fixes
+* tlbmiss_cost.sh supresses oprofile errors
+* numerous fixes to setup_helper.py
+* Corrected usage of hugetlbfs_test_feature return value
+* find_mounts now correctly ignores non-hugetlbfs mount points
+* When prefaulting pages for get_huge_pages readv was using the fd
+ for the mapping, this caused the prefault to fail on older libc.
+ Now /dev/zero is used for all prefaulting
+
+libhugetlbfs 2.10 "Another Hottie"
+======================================================================
+Bug Fixes
+* hugeadm now handles pool size deltas properly
+* Makefile uses ?= to assign PREFIX and EXEDIR to allow for easier build
+ modification
+
libhugetlbfs 2.9 "Something Spicy"
======================================================================
New Features
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/TLBC/OpCollect.pm new/libhugetlbfs-2.11/TLBC/OpCollect.pm
--- old/libhugetlbfs-2.9/TLBC/OpCollect.pm 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/TLBC/OpCollect.pm 2010-12-16 18:38:22.000000000 +0100
@@ -55,7 +55,7 @@
my $self = shift;
my $vmlinux = shift;
my $refEvents = shift;
- my $cmd = "$Bin/oprofile_start.sh --vmlinux=$vmlinux ";
+ my $cmd = "$Bin/oprofile_start.sh --sample-cycle-factor 6 --sample-event-factor 2 --vmlinux=$vmlinux ";
foreach my $event (@{$refEvents}) {
$cmd .= " --event=$event";
$self->_get_event($event);
@@ -154,7 +154,10 @@
$col = $self->_get_column($event);
foreach $line (@results) {
- if ($line =~ /$binName/) {
+ if ($line !~ /^\s+[0-9]/) {
+ next;
+ }
+ if ($binName eq "/" || $line =~ /$binName/) {
chomp($line);
$line =~ s/^\s+//;
$line =~ s/\s+$//;
@@ -169,7 +172,7 @@
sub read_eventcount()
{
system("opcontrol --dump > /dev/null 2>&1");
- $report = `opreport`;
+ $report = `opreport -x 2> /dev/null`;
}
sub shutdown()
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/TLBC/PerfCollect.pm new/libhugetlbfs-2.11/TLBC/PerfCollect.pm
--- old/libhugetlbfs-2.9/TLBC/PerfCollect.pm 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/TLBC/PerfCollect.pm 2010-12-16 18:38:22.000000000 +0100
@@ -111,7 +111,7 @@
}
foreach $line (@lines) {
- if ($line =~ /$binName/) {
+ if ($binName eq "/" || $line =~ /$binName/) {
chomp($line);
$line =~ s/^\s+//;
$line =~ s/\s+$//;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/alloc.c new/libhugetlbfs-2.11/alloc.c
--- old/libhugetlbfs-2.9/alloc.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/alloc.c 2010-12-16 18:38:22.000000000 +0100
@@ -77,27 +77,42 @@
void *get_huge_pages(size_t len, ghp_t flags)
{
void *buf;
- int buf_fd;
- int saved_error;
+ int buf_fd = -1;
int mmap_reserve = __hugetlb_opts.no_reserve ? MAP_NORESERVE : 0;
+ int mmap_hugetlb = 0;
+ int ret;
/* Catch an altogether-too easy typo */
if (flags & GHR_MASK)
ERROR("Improper use of GHR_* in get_huge_pages()\n");
- /* Create a file descriptor for the new region */
- buf_fd = hugetlbfs_unlinked_fd();
- if (buf_fd < 0) {
- WARNING("Couldn't open hugetlbfs file for %zd-sized buffer\n",
- len);
- return NULL;
+#ifdef MAP_HUGETLB
+ mmap_hugetlb = MAP_HUGETLB;
+#endif
+
+ if (__hugetlb_opts.map_hugetlb &&
+ gethugepagesize() == kernel_default_hugepage_size()) {
+ /* Because we can use MAP_HUGETLB, we simply mmap the region */
+ buf = mmap(NULL, len, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|MAP_ANONYMOUS|mmap_hugetlb|mmap_reserve,
+ 0, 0);
+ } else {
+ /* Create a file descriptor for the new region */
+ buf_fd = hugetlbfs_unlinked_fd();
+ if (buf_fd < 0) {
+ WARNING("Couldn't open hugetlbfs file for %zd-sized buffer\n",
+ len);
+ return NULL;
+ }
+
+ /* Map the requested region */
+ buf = mmap(NULL, len, PROT_READ|PROT_WRITE,
+ MAP_PRIVATE|mmap_reserve, buf_fd, 0);
}
- /* Map the requested region */
- buf = mmap(NULL, len, PROT_READ|PROT_WRITE,
- MAP_PRIVATE|mmap_reserve, buf_fd, 0);
if (buf == MAP_FAILED) {
- close(buf_fd);
+ if (buf_fd >= 0)
+ close(buf_fd);
WARNING("get_huge_pages: New region mapping failed (flags: 0x%lX): %s\n",
flags, strerror(errno));
@@ -105,18 +120,19 @@
}
/* Fault the region to ensure accesses succeed */
- if (hugetlbfs_prefault(buf_fd, buf, len) != 0) {
- saved_error = errno;
+ ret = hugetlbfs_prefault(buf, len);
+ if (ret != 0) {
munmap(buf, len);
- close(buf_fd);
+ if (buf_fd >= 0)
+ close(buf_fd);
WARNING("get_huge_pages: Prefaulting failed (flags: 0x%lX): %s\n",
- flags, strerror(saved_error));
+ flags, strerror(ret));
return NULL;
}
/* Close the file so we do not have to track the descriptor */
- if (close(buf_fd) != 0) {
+ if (buf_fd >= 0 && close(buf_fd) != 0) {
WARNING("Failed to close new buffer fd: %s\n", strerror(errno));
munmap(buf, len);
return NULL;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/contrib/tlbmiss_cost.sh new/libhugetlbfs-2.11/contrib/tlbmiss_cost.sh
--- old/libhugetlbfs-2.9/contrib/tlbmiss_cost.sh 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/contrib/tlbmiss_cost.sh 2010-12-16 18:38:22.000000000 +0100
@@ -624,7 +624,7 @@
LAST_LATENCY_CYCLES=$(($WALK/$DTLB))
}
-ARGS=`getopt -o c:s:vqh --long calibrator:,stream:,vmlinux:,verbose,quiet,fetch-calibrator,fetch-stream,help -n 'tlbmiss_cost.sh' -- "$@"`
+ARGS=`getopt -o c:s:fvqh --long calibrator:,stream:,vmlinux:,verbose,quiet,fetch-calibrator,fetch-stream,ignore-cache,help -n 'tlbmiss_cost.sh' -- "$@"`
eval set -- "$ARGS"
@@ -635,6 +635,7 @@
--vmlinux) VMLINUX="--vmlinux $2" ; shift 2 ;;
-v|--verbose) VERBOSE=$(($VERBOSE+1)); shift;;
-q|--quiet) VERBOSE=$(($VERBOSE-1)); shift;;
+ -f|--ignore-cache) IGNORE_CACHE=yes; shift;;
--fetch-calibrator) calibrator_fetch; shift;;
--fetch-stream) stream_fetch; shift;;
-h|--help) usage; shift;;
@@ -644,8 +645,35 @@
esac
done
+HOSTNAME=`hostname 2> /dev/null`
ARCH=`uname -m | sed -e s/i.86/i386/`
+if [ "$IGNORE_CACHE" != "yes" ]; then
+ print_trace Searching for a cached value for TLB miss
+
+ # Look for a cached entry for the TLB miss value
+ if [ -e /etc/tlbmisscost.conf ]; then
+ print_trace Checking /etc/tlbmisscost.conf
+ grep TLB_MISS_COST /etc/tlbmisscost.conf
+ if [ $? -eq 0 ]; then
+ exit 0
+ fi
+ fi
+
+ # Look for a cached entry in home
+ if [ -e $HOME/.tlbmisscostrc ]; then
+ print_trace Checking $HOME/.tlbmisscostrc
+ HOSTNAME=`hostname 2> /dev/null`
+ if [ "$HOSTNAME" != "" -a "$HOSTNAME" != "localhost" ]; then
+ grep $HOSTNAME:TLB_MISS_COST $HOME/.tlbmisscostrc | sed -e "s/^$HOSTNAME://"
+ if [ $? -eq 0 ]; then
+ exit 0
+ fi
+ fi
+ fi
+ print_trace Cached value unavailable
+fi
+
if [[ "$ARCH" == "ppc64" || "$ARCH" == "ppc" ]]; then
oprofile_calc
else
@@ -653,4 +681,13 @@
fi
echo TLB_MISS_COST=$LAST_LATENCY_CYCLES
+
+# Save for future reference
+echo TLB_MISS_COST=$LAST_LATENCY_CYCLES 2> /dev/null > /etc/tlbmisscost.conf
+if [ "$HOSTNAME" != "" -a "$HOSTNAME" != "localhost" ]; then
+ grep -v $HOSTNAME:TLB_MISS_COST $HOME/.tlbmisscostrc > $HOME/.tlbmisscostrc.$$ 2> /dev/null
+ echo $HOSTNAME:TLB_MISS_COST=$LAST_LATENCY_CYCLES >> $HOME/.tlbmisscostrc.$$
+ mv $HOME/.tlbmisscostrc.$$ $HOME/.tlbmisscostrc
+fi
+
exit 0
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/cpupcstat new/libhugetlbfs-2.11/cpupcstat
--- old/libhugetlbfs-2.9/cpupcstat 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/cpupcstat 2010-12-16 18:38:22.000000000 +0100
@@ -18,6 +18,7 @@
my $target;
my $real_target;
my $target_pid;
+my $target_global;
my $misses;
my $instructions = 0;
my $cycles = 0;
@@ -37,7 +38,17 @@
my $miss_scale = 0;
my $ins_scale = 0;
my $cyc_scale = 0;
-my $tlbcost_cache = "$Bin/tlbmisscost";
+
+sub calc_tlbmiss_cost()
+{
+ my $cost_script = `which tlbmiss_cost.sh`;
+ if ($cost_script eq "") {
+ $cost_script = "$Bin/contrib/tlbmiss_cost.sh";
+ }
+ my $data = `$cost_script --vmlinux $vmlinux`;
+ ($data,$cost_in_cycles) = split(/\=/, $data);
+ chomp($cost_in_cycles);
+}
sub start_target()
{
@@ -105,6 +116,9 @@
$binName = $results[0];
}
$pid = start_target();
+ } elsif (defined $target_global) {
+ $binName='/';
+ $pid = $$;
}
$binName = `basename $binName`;
@@ -117,9 +131,12 @@
if ($instruct_ratio) {
printf("%24s\n", "Instructions/TLB Miss\n");
$ins_scale = $collector->samples("instructions");
- } elsif ($cycle_ratio || $service) {
+ } elsif ($cycle_ratio) {
printf("%24s\n", "Cycles/TLB Miss\n");
$cyc_scale = $collector->samples("timer");
+ } elsif ($service) {
+ printf("%24s\n", "TLB Miss %age Time\n");
+ $cyc_scale = $collector->samples("timer");
} else {
print("\n");
}
@@ -159,6 +176,17 @@
($cyc_new * $cyc_scale) / ($new * $miss_scale));
}
$cyc_prev = $ret;
+ } elsif ($service) {
+
+ $ret = $collector->get_current_eventcount($binName,
+ "timer");
+ $cyc_new = $ret - $cyc_prev;
+ my $miss_cycles = $new * $cost_in_cycles * $miss_scale;
+ my $total_cycles = $cyc_new * $cyc_scale;
+
+ printf "%24.4f%%\n", $miss_cycles * 100/$total_cycles;
+
+ $cyc_prev = $ret;
} else {
print("\n");
}
@@ -206,12 +234,15 @@
--vmlinux /path/to/vmlinux Sets the vmlinux file to use
--delay N Waits N seconds before rereading the
miss rate
+ --target-global Watch the miss rate of all processes
--target-pid P Watch the miss rate of P instead of a target
--real-target T Watch T instead of target in case target is
a launcher script
--time-limit L Sets a time limit for watching the target
--kernel Output DTLB miss data for the kernel as well
as the specified target
+ --time-servicing Print the percentage of time servicing TLB
+ misses
--misses-per-instruction Prints the ratio of TLB misses per
instruction retired
--misses-per-cycle Prints the ratio of TLB misses per CPU cycle
@@ -236,6 +267,7 @@
GetOptions ('v|vmlinux=s' => \$vmlinux,
'h|help' => \&print_usage,
'd|delay=i' => \$wait_time,
+ 'g|target-global' => \$target_global,
'p|target-pid=i' => \$target_pid,
'r|real-target=s' => \$real_target,
'l|time-limit=i' => \$time_limit,
@@ -248,7 +280,7 @@
's|persist' => \$persist,
'<>' => \&get_target);
-if (!$target && not defined $target_pid) {
+if (!$target && !$target_global && not defined $target_pid) {
print_usage();
}
@@ -261,6 +293,10 @@
chomp($target);
}
+if ($service) {
+ calc_tlbmiss_cost();
+}
+
$misses = 0;
$kern_misses = 0;
run_profile();
@@ -283,28 +319,8 @@
}
if ($service && $cycles > 0) {
- if ( !$config && -e $tlbcost_cache ) {
- $config = $tlbcost_cache;
- }
- if ($config) {
- open(DAT, $config) || die "Failed to open $config\n";
- my $data = <DAT>;
- close(DAT);
- ($data,$cost_in_cycles) = split(/\=/, $data);
- chomp($cost_in_cycles);
- }
-
if ($cost_in_cycles <= 0) {
- my $cost_script = `which tlbmiss_cost.sh`;
- if ($cost_script eq "") {
- $cost_script = "$Bin/contrib/tlbmiss_cost.sh";
- }
- my $data = `$cost_script --vmlinux $vmlinux`;
- ($data,$cost_in_cycles) = split(/\=/, $data);
- chomp($cost_in_cycles);
- open (DAT, ">$tlbcost_cache");
- print DAT "TLB_MISS_COST=$cost_in_cycles";
- close (DAT);
+ calc_tlbmiss_cost();
}
my $total_cost = $cost_in_cycles * $misses;
print("$target spent ",
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/debug.c new/libhugetlbfs-2.11/debug.c
--- old/libhugetlbfs-2.9/debug.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/debug.c 2010-12-16 18:38:22.000000000 +0100
@@ -28,8 +28,8 @@
#include "libhugetlbfs_internal.h"
int __hugetlbfs_verbose = VERBOSITY_DEFAULT;
-int __hugetlbfs_debug = 0;
-int __hugetlbfs_prefault = 1;
+bool __hugetlbfs_debug = false;
+bool __hugetlbfs_prefault = true;
char __hugetlbfs_hostname[64];
static int initialized;
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/elflink.c new/libhugetlbfs-2.11/elflink.c
--- old/libhugetlbfs-2.9/elflink.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/elflink.c 2010-12-16 18:38:22.000000000 +0100
@@ -59,6 +59,14 @@
#define ELF_ST_TYPE(x) ELF64_ST_TYPE(x)
#endif
+/*
+ * SHARED_TIMEOUT is used by find_or_prepare_shared_file for when it
+ * should timeout while waiting for other users to finish preparing
+ * the file it wants. The value is the number of tries before giving
+ * up with a 1 second wait between tries
+ */
+#define SHARED_TIMEOUT 10
+
/* This function prints an error message to stderr, then aborts. It
* is safe to call, even if the executable segments are presently
* unmapped.
@@ -971,9 +979,10 @@
*/
static int find_or_prepare_shared_file(struct seg_info *htlb_seg_info)
{
- int fdx, fds;
+ int fdx = -1, fds;
int errnox, errnos;
int ret;
+ int i;
char final_path[PATH_MAX+1];
char tmp_path[PATH_MAX+1];
@@ -982,7 +991,7 @@
return -1;
assemble_path(tmp_path, "%s.tmp", final_path);
- do {
+ for (i = 0; i < SHARED_TIMEOUT; i++) {
/* NB: mode is modified by umask */
fdx = open(tmp_path, O_CREAT | O_EXCL | O_RDWR, 0666);
errnox = errno;
@@ -1038,8 +1047,7 @@
/* Both opens failed, somebody else is still preparing */
/* Wait and try again */
sleep(1);
- /* FIXME: should have a timeout */
- } while (1);
+ }
fail:
if (fdx > 0) {
@@ -1223,7 +1231,7 @@
"remapping for non-relinked "
"binaries\n");
INFO("Disabling filesz copy optimization\n");
- __hugetlb_opts.min_copy = 0;
+ __hugetlb_opts.min_copy = false;
} else {
if (&__executable_start) {
WARNING("LD_PRELOAD is incompatible with "
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/huge_page_setup_helper.py new/libhugetlbfs-2.11/huge_page_setup_helper.py
--- old/libhugetlbfs-2.9/huge_page_setup_helper.py 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/huge_page_setup_helper.py 2010-12-16 18:38:22.000000000 +0100
@@ -10,7 +10,12 @@
#
import os
-debug = True
+debug = False
+
+# must be executed under the root to operate
+if os.geteuid() != 0:
+ print "You must be root to setup hugepages!"
+ os._exit(1)
# config files we need access to
sysctlConf = "/etc/sysctl.conf"
@@ -95,13 +100,13 @@
try:
userIn = raw_input("How much memory would you like to allocate for huge pages? "
"(input in MB, unless postfixed with GB): ")
- if userIn[-2:] == "GB":
+ if userIn[-2:] == "GB":
userHugePageReqMB = int(userIn[0:-2]) * 1024
- elif userIn[-1:] == "G":
+ elif userIn[-1:] == "G":
userHugePageReqMB = int(userIn[0:-1]) * 1024
- elif userIn[-2:] == "MB":
+ elif userIn[-2:] == "MB":
userHugePageReqMB = int(userIn[0:-2])
- elif userIn[-1:] == "M":
+ elif userIn[-1:] == "M":
userHugePageReqMB = int(userIn[0:-1])
else:
userHugePageReqMB = int(userIn)
@@ -109,7 +114,7 @@
if userHugePageReqMB > (memTotal - 128):
userIn = None
print "Refusing to allocate %d, you must leave at least 128MB for the system" % userHugePageReqMB
- elif userHugePageReqMB < (hugePageSize / 1024):
+ elif userHugePageReqMB < (hugePageSize / (1024 * 1024)):
userIn = None
print "Sorry, allocation must be at least a page's worth!"
else:
@@ -129,8 +134,10 @@
# ask for the name of the group allowed access to huge pages
while inputIsValid == False:
foundbad = False
- userGroupReq = raw_input("What group should have access to the huge pages? "
- "(The group will be created, if need be): ")
+ userGroupReq = raw_input("What group should have access to the huge pages?"
+ "(The group will be created, if need be) [hugepages]: ")
+ if userGroupReq is '':
+ userGroupReq = 'hugepages'
if userGroupReq[0].isdigit() or userGroupReq[0] == "-":
foundbad = True
print "Group names cannot start with a number or dash, please try again!"
@@ -159,7 +166,7 @@
print "Group %s (gid %d) already exists, we'll use it" % (userGroupReq, userGIDReq)
else:
if debug == False:
- os.popen("/usr/sbin/groupadd %s" % userGroupReq)
+ os.popen("/usr/sbin/groupadd %s" % userGroupReq)
else:
print "/usr/sbin/groupadd %s" % userGroupReq
groupNames = os.popen("/usr/bin/getent group %s" % userGroupReq).readlines()
@@ -215,7 +222,10 @@
if userExists == False:
print "Creating user %s with membership in huge page group" % hugeUser
if debug == False:
- os.popen("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
+ if hugeUser == userGroupReq:
+ os.popen("/usr/sbin/useradd %s -g %s" % (hugeUser, userGroupReq))
+ else:
+ os.popen("/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq))
else:
print "/usr/sbin/useradd %s -G %s" % (hugeUser, userGroupReq)
print
@@ -291,8 +301,13 @@
for line in limitsConfLines:
cfgExist = False
for hugeUser in hugePageUserList:
- if line.split()[0] == hugeUser:
- cfgExist = True
+ try:
+ if line.split()[0] == hugeUser:
+ cfgExist = True
+ except IndexError:
+ # hit either white or comment line, it is safe not to take
+ # any action and continue.
+ pass
if cfgExist == True:
continue
else:
@@ -326,4 +341,3 @@
print " * Huge Page User Group.....: %s (%d)" % (userGroupReq, userGIDReq)
print
-
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/hugeadm.c new/libhugetlbfs-2.11/hugeadm.c
--- old/libhugetlbfs-2.9/hugeadm.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/hugeadm.c 2010-12-16 18:38:22.000000000 +0100
@@ -1047,7 +1047,7 @@
static long value_adjust(char *adjust_str, long base, long page_size)
{
- unsigned long long adjust;
+ long long adjust;
char *iter;
/* Convert and validate the adjust. */
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/hugeutils.c new/libhugetlbfs-2.11/hugeutils.c
--- old/libhugetlbfs-2.9/hugeutils.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/hugeutils.c 2010-12-16 18:38:22.000000000 +0100
@@ -241,7 +241,7 @@
{
char *env;
- __hugetlb_opts.min_copy = 1;
+ __hugetlb_opts.min_copy = true;
env = getenv("HUGETLB_VERBOSE");
if (env)
@@ -249,13 +249,13 @@
env = getenv("HUGETLB_DEBUG");
if (env) {
- __hugetlbfs_debug = 1;
+ __hugetlbfs_debug = true;
__hugetlbfs_verbose = VERBOSE_DEBUG;
}
env = getenv("HUGETLB_NO_PREFAULT");
if (env)
- __hugetlbfs_prefault = 0;
+ __hugetlbfs_prefault = false;
__hugetlb_opts.share_path = getenv("HUGETLB_SHARE_PATH");
__hugetlb_opts.elfmap = getenv("HUGETLB_ELFMAP");
@@ -274,7 +274,7 @@
if (__hugetlb_opts.min_copy && env && (strcasecmp(env, "no") == 0)) {
INFO("HUGETLB_MINIMAL_COPY=%s, disabling filesz copy "
"optimization\n", env);
- __hugetlb_opts.min_copy = 0;
+ __hugetlb_opts.min_copy = false;
}
env = getenv("HUGETLB_SHARE");
@@ -298,17 +298,32 @@
*/
env = getenv("HUGETLB_MORECORE_SHRINK");
if (env && strcasecmp(env, "yes") == 0)
- __hugetlb_opts.shrink_ok = 1;
+ __hugetlb_opts.shrink_ok = true;
/* Determine if shmget() calls should be overridden */
env = getenv("HUGETLB_SHM");
if (env && !strcmp(env, "yes"))
- __hugetlb_opts.shm_enabled = 1;
+ __hugetlb_opts.shm_enabled = true;
/* Determine if all reservations should be avoided */
env = getenv("HUGETLB_NO_RESERVE");
if (env && !strcmp(env, "yes"))
- __hugetlb_opts.no_reserve = 1;
+ __hugetlb_opts.no_reserve = true;
+}
+
+void hugetlbfs_setup_kernel_page_size()
+{
+ long page_size = kernel_default_hugepage_size();
+
+ if (page_size <= 0) {
+ WARNING("Unable to find default kernel huge page size\n");
+ return;
+ }
+
+ INFO("Found pagesize %ld kB\n", page_size / 1024);
+ hpage_sizes[0].pagesize = page_size;
+
+ nr_hpage_sizes = 1;
}
void hugetlbfs_check_priv_resv()
@@ -318,10 +333,10 @@
* prefaulting the huge pages we allocate since the kernel
* guarantees them. This can help NUMA performance quite a bit.
*/
- if (hugetlbfs_test_feature(HUGETLB_FEATURE_PRIVATE_RESV)) {
+ if (hugetlbfs_test_feature(HUGETLB_FEATURE_PRIVATE_RESV) > 0) {
INFO("Kernel has MAP_PRIVATE reservations. Disabling "
"heap prefaulting.\n");
- __hugetlbfs_prefault = 0;
+ __hugetlbfs_prefault = false;
}
}
@@ -333,11 +348,32 @@
* the user of NORESERVE where necessary
*/
if (__hugetlb_opts.no_reserve &&
- !hugetlbfs_test_feature(HUGETLB_FEATURE_SAFE_NORESERVE)) {
+ hugetlbfs_test_feature(HUGETLB_FEATURE_SAFE_NORESERVE) <= 0) {
INFO("Kernel is not safe for MAP_NORESERVE. Forcing "
"use of reservations.\n");
- __hugetlb_opts.no_reserve = 0;
+ __hugetlb_opts.no_reserve = false;
+ }
+}
+
+void hugetlbfs_check_map_hugetlb()
+{
+/*
+ * FIXME: MAP_HUGETLB has not been picked up by glibc so even though the
+ * kernel may support it, without the userspace mmap flag it cannot be
+ * used. This ifdef should be removed when the MAP_HUGETLB flag makes it
+ * into glibc.
+ */
+#ifdef MAP_HUGETLB
+ /*
+ * Kernels after 2.6.32 support mmaping pseudo-anonymous regions
+ * backed by huge pages, use this feature for huge pages we
+ * don't intend to share.
+ */
+ if (hugetlbfs_test_feature(HUGETLB_FEATURE_MAP_HUGETLB) > 0) {
+ INFO("Kernel supports MAP_HUGETLB\n");
+ __hugetlb_opts.map_hugetlb = true;
}
+#endif
}
/*
@@ -399,7 +435,7 @@
return -1;
}
-static void probe_default_hpage_size(void)
+void probe_default_hpage_size(void)
{
long size;
int index;
@@ -489,7 +525,7 @@
strcpy(hpage_sizes[idx].mount, path);
}
-static void debug_show_page_sizes(void)
+void debug_show_page_sizes(void)
{
int i;
@@ -508,7 +544,7 @@
char path[PATH_MAX+1];
char line[LINE_MAXLEN + 1];
char *eol;
- int bytes, err;
+ int bytes, err, dummy;
off_t offset;
fd = open("/proc/mounts", O_RDONLY);
@@ -537,9 +573,16 @@
offset = bytes - (eol + 1 - line);
lseek(fd, -offset, SEEK_CUR);
- err = sscanf(line, "%*s %" stringify(PATH_MAX) "s hugetlbfs ",
- path);
- if ((err == 1) && (hugetlbfs_test_path(path) == 1))
+ /*
+ * Match only hugetlbfs filesystems.
+ * Subtle: sscanf returns the number of input items matched
+ * and assigned. To force sscanf to match the literal
+ * "hugetlbfs" string we include a 'dummy' input item
+ * following that string.
+ */
+ err = sscanf(line, "%*s %" stringify(PATH_MAX) "s hugetlbfs "
+ "%*s %d", path, &dummy);
+ if ((err == 2) && (hugetlbfs_test_path(path) == 1))
add_hugetlbfs_mount(path, 0);
}
close(fd);
@@ -572,10 +615,6 @@
/* Then probe all mounted filesystems */
if (do_scan)
find_mounts();
-
- probe_default_hpage_size();
- if (__hugetlbfs_debug)
- debug_show_page_sizes();
}
int get_pool_size(long size, struct hpage_pool *pool)
@@ -915,8 +954,17 @@
}
#define IOV_LEN 64
-int hugetlbfs_prefault(int fd, void *addr, size_t length)
+int hugetlbfs_prefault(void *addr, size_t length)
{
+ size_t offset;
+ struct iovec iov[IOV_LEN];
+ int ret;
+ int i;
+ int fd;
+
+ if (!__hugetlbfs_prefault)
+ return 0;
+
/*
* The NUMA users of libhugetlbfs' malloc feature are
* expected to use the numactl program to specify an
@@ -933,30 +981,32 @@
* -ENOMEM is returned. The caller is expected to release the entire
* mapping and optionally it may recover by mapping base pages instead.
*/
- if (__hugetlbfs_prefault) {
- int i;
- size_t offset;
- struct iovec iov[IOV_LEN];
- int ret;
-
- for (offset = 0; offset < length; ) {
- for (i = 0; i < IOV_LEN && offset < length; i++) {
- iov[i].iov_base = addr + offset;
- iov[i].iov_len = 1;
- offset += gethugepagesize();
- }
- ret = readv(fd, iov, i);
- if (ret != i) {
- DEBUG("Got %d of %d requested; err=%d\n", ret,
- i, ret < 0 ? errno : 0);
- WARNING("Failed to reserve %ld huge pages "
- "for new region\n",
- length / gethugepagesize());
- return -ENOMEM;
- }
+
+ fd = open("/dev/zero", O_RDONLY);
+ if (fd < 0) {
+ ERROR("Failed to open /dev/zero for reading\n");
+ return -ENOMEM;
+ }
+
+ for (offset = 0; offset < length; ) {
+ for (i = 0; i < IOV_LEN && offset < length; i++) {
+ iov[i].iov_base = addr + offset;
+ iov[i].iov_len = 1;
+ offset += gethugepagesize();
+ }
+ ret = readv(fd, iov, i);
+ if (ret != i) {
+ DEBUG("Got %d of %d requested; err=%d\n", ret,
+ i, ret < 0 ? errno : 0);
+ WARNING("Failed to reserve %ld huge pages "
+ "for new region\n",
+ length / gethugepagesize());
+ close(fd);
+ return -ENOMEM;
}
}
+ close(fd);
return 0;
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/init.c new/libhugetlbfs-2.11/init.c
--- old/libhugetlbfs-2.9/init.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/init.c 2010-12-16 18:38:22.000000000 +0100
@@ -23,10 +23,15 @@
{
hugetlbfs_setup_env();
hugetlbfs_setup_debug();
+ hugetlbfs_setup_kernel_page_size();
setup_mounts();
+ probe_default_hpage_size();
+ if (__hugetlbfs_debug)
+ debug_show_page_sizes();
setup_features();
hugetlbfs_check_priv_resv();
hugetlbfs_check_safe_noreserve();
+ hugetlbfs_check_map_hugetlb();
#ifndef NO_ELFLINK
hugetlbfs_setup_elflink();
#endif
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/kernel-features.c new/libhugetlbfs-2.11/kernel-features.c
--- old/libhugetlbfs-2.9/kernel-features.c 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/kernel-features.c 2010-12-16 18:38:22.000000000 +0100
@@ -44,6 +44,10 @@
[HUGETLB_FEATURE_SAFE_NORESERVE] = {
.name = "noreserve_safe",
.required_version = "2.6.34",
+ },
+ [HUGETLB_FEATURE_MAP_HUGETLB] = {
+ .name = "map_hugetlb",
+ .required_version = "2.6.32",
}
};
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/libhugetlbfs-2.9/libhugetlbfs_internal.h new/libhugetlbfs-2.11/libhugetlbfs_internal.h
--- old/libhugetlbfs-2.9/libhugetlbfs_internal.h 2010-06-28 14:05:50.000000000 +0200
+++ new/libhugetlbfs-2.11/libhugetlbfs_internal.h 2010-12-16 18:38:22.000000000 +0100
@@ -33,6 +33,7 @@
#include
participants (1)
-
root@hilbert.suse.de