Hello community,
here is the log from the commit of package xen for openSUSE:Factory checked in at 2018-09-18 11:39:08
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/xen (Old)
and /work/SRC/openSUSE:Factory/.xen.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "xen"
Tue Sep 18 11:39:08 2018 rev:254 rq:635194 version:4.11.0_08
Changes:
--------
--- /work/SRC/openSUSE:Factory/xen/xen.changes 2018-09-05 13:43:49.845806306 +0200
+++ /work/SRC/openSUSE:Factory/.xen.new/xen.changes 2018-09-18 11:39:18.528170496 +0200
@@ -1,0 +2,38 @@
+Tue Sep 11 13:29:58 MDT 2018 - carnold@suse.com
+
+- bsc#1106263 - L3: The affinity reporting via 'xl vcpu-list' is
+ apparently broken
+ 5b8fae26-tools-libxl-correct-vcpu-affinity-output-with-sparse-physical-cpu-map.patch
+ 5b8fae26-xen-fill-topology-info-for-all-present-cpus.patch
+ 5b8fb5af-tools-xl-refuse-to-set-number-of-vcpus-to-0-via-xl-vcpu-set.patch
+
+-------------------------------------------------------------------
+Tue Sep 11 07:47:57 MDT 2018 - carnold@suse.com
+
+- bsc#1094508 - L3: Kernel oops in fs/dcache.c called by
+ d_materialise_unique()
+ 5b9784ad-x86-HVM-drop-hvm_fetch_from_guest_linear.patch
+ 5b9784d2-x86-HVM-add-known_gla-helper.patch
+ 5b9784f2-x86-HVM-split-page-straddling-accesses.patch
+- bsc#1103279 - (CVE-2018-15470) VUL-0: CVE-2018-15470: xen:
+ oxenstored does not apply quota-maxentity (XSA-272)
+ 5b72fbbe-oxenstored-eval-order.patch
+- bsc#1103275 - (CVE-2018-15469) VUL-0: CVE-2018-15469: xen: Use of
+ v2 grant tables may cause crash on ARM (XSA-268)
+ 5b72fbbe-ARM-disable-grant-table-v2.patch
+- Upstream patches from Jan (bsc#1027519)
+ 5b6d84ac-x86-fix-improve-vlapic-read-write.patch
+ 5b74190e-x86-hvm-ioreq-MMIO-range-check-honor-DF.patch
+ 5b75afef-x86-setup-avoid-OoB-E820-lookup.patch
+ 5b76b780-rangeset-inquiry-functions-tolerate-NULL.patch
+ 5b83c654-VT-d-dmar-iommu-mem-leak-fix.patch
+ 5b8d5832-x86-assorted-array_index_nospec-insertions.patch
+- Drop 5b741962-x86-write-to-correct-variable-in-parse_pv_l1tf.patch
+
+-------------------------------------------------------------------
+Tue Aug 28 16:07:52 MDT 2018 - carnold@suse.com
+
+- bsc#1078292 - rpmbuild -ba SPECS/xen.spec with xen-4.9.1 failed
+ xen.spec
+
+-------------------------------------------------------------------
@@ -14,2 +52,2 @@
-- bsc#1103276 - VUL-0: xen: x86: Incorrect MSR_DEBUGCTL handling
- lets guests enable BTS (XSA-269)
+- bsc#1103276 - VUL-0: CVE-2018-15468: xen: x86: Incorrect
+ MSR_DEBUGCTL handling lets guests enable BTS (XSA-269)
Old:
----
5b741962-x86-write-to-correct-variable-in-parse_pv_l1tf.patch
New:
----
5b6d84ac-x86-fix-improve-vlapic-read-write.patch
5b72fbbe-ARM-disable-grant-table-v2.patch
5b72fbbe-oxenstored-eval-order.patch
5b74190e-x86-hvm-ioreq-MMIO-range-check-honor-DF.patch
5b75afef-x86-setup-avoid-OoB-E820-lookup.patch
5b76b780-rangeset-inquiry-functions-tolerate-NULL.patch
5b83c654-VT-d-dmar-iommu-mem-leak-fix.patch
5b8d5832-x86-assorted-array_index_nospec-insertions.patch
5b8fae26-tools-libxl-correct-vcpu-affinity-output-with-sparse-physical-cpu-map.patch
5b8fae26-xen-fill-topology-info-for-all-present-cpus.patch
5b8fb5af-tools-xl-refuse-to-set-number-of-vcpus-to-0-via-xl-vcpu-set.patch
5b9784ad-x86-HVM-drop-hvm_fetch_from_guest_linear.patch
5b9784d2-x86-HVM-add-known_gla-helper.patch
5b9784f2-x86-HVM-split-page-straddling-accesses.patch
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ xen.spec ++++++
--- /var/tmp/diff_new_pack.RbomEQ/_old 2018-09-18 11:39:20.212168719 +0200
+++ /var/tmp/diff_new_pack.RbomEQ/_new 2018-09-18 11:39:20.216168715 +0200
@@ -83,6 +83,7 @@
BuildRequires: glib2-devel
BuildRequires: libaio-devel
BuildRequires: libbz2-devel
+BuildRequires: libnl3-devel
BuildRequires: libpixman-1-0-devel
BuildRequires: libuuid-devel
BuildRequires: libxml2-devel
@@ -126,7 +127,7 @@
BuildRequires: pesign-obs-integration
%endif
-Version: 4.11.0_04
+Version: 4.11.0_08
Release: 0
Summary: Xen Virtualization: Hypervisor (aka VMM aka Microkernel)
License: GPL-2.0
@@ -188,19 +189,32 @@
Patch25: 5b56feb1-hvm-Disallow-unknown-MSR_EFER-bits.patch
Patch26: 5b56feb2-spec-ctrl-Fix-the-parsing-of-xpti--on-fixed-Intel-hardware.patch
Patch27: 5b62ca93-VMX-avoid-hitting-BUG_ON.patch
-Patch28: 5b6d8ce2-x86-XPTI-parsing.patch
-Patch29: 5b72fbbe-vtx-Fix-the-checking-for-unknown-invalid-MSR_DEBUGCTL-bits.patch
-Patch30: 5b72fbbf-1-spec-ctrl-Calculate-safe-PTE-addresses-for-L1TF-mitigations.patch
-Patch31: 5b72fbbf-2-spec-ctrl-Introduce-an-option-to-control-L1TF-mitigation-for-PV-guests.patch
-Patch32: 5b72fbbf-3-shadow-Infrastructure-to-force-a-PV-guest-into-shadow-mode.patch
-Patch33: 5b72fbbf-4-mm-Plumbing-to-allow-any-PTE-update-to-fail-with--ERESTART.patch
-Patch34: 5b72fbbf-5-pv-Force-a-guest-into-shadow-mode-when-it-writes-an-L1TF-vulnerable-PTE.patch
-Patch35: 5b72fbbf-6-spec-ctrl-CPUID-MSR-definitions-for-L1D_FLUSH.patch
-Patch36: 5b72fbbf-7-msr-Virtualise-MSR_FLUSH_CMD-for-guests.patch
-Patch37: 5b72fbbf-8-spec-ctrl-Introduce-an-option-to-control-L1D_FLUSH-for-HVM-HAP-guests.patch
-Patch38: 5b72fbbf-x86-Make-spec-ctrl-no-a-global-disable-of-all-mitigations.patch
-Patch39: 5b72fbbf-xl.conf-Add-global-affinity-masks.patch
-Patch40: 5b741962-x86-write-to-correct-variable-in-parse_pv_l1tf.patch
+Patch28: 5b6d84ac-x86-fix-improve-vlapic-read-write.patch
+Patch29: 5b6d8ce2-x86-XPTI-parsing.patch
+Patch30: 5b72fbbe-ARM-disable-grant-table-v2.patch
+Patch31: 5b72fbbe-oxenstored-eval-order.patch
+Patch32: 5b72fbbe-vtx-Fix-the-checking-for-unknown-invalid-MSR_DEBUGCTL-bits.patch
+Patch33: 5b72fbbf-1-spec-ctrl-Calculate-safe-PTE-addresses-for-L1TF-mitigations.patch
+Patch34: 5b72fbbf-2-spec-ctrl-Introduce-an-option-to-control-L1TF-mitigation-for-PV-guests.patch
+Patch35: 5b72fbbf-3-shadow-Infrastructure-to-force-a-PV-guest-into-shadow-mode.patch
+Patch36: 5b72fbbf-4-mm-Plumbing-to-allow-any-PTE-update-to-fail-with--ERESTART.patch
+Patch37: 5b72fbbf-5-pv-Force-a-guest-into-shadow-mode-when-it-writes-an-L1TF-vulnerable-PTE.patch
+Patch38: 5b72fbbf-6-spec-ctrl-CPUID-MSR-definitions-for-L1D_FLUSH.patch
+Patch39: 5b72fbbf-7-msr-Virtualise-MSR_FLUSH_CMD-for-guests.patch
+Patch40: 5b72fbbf-8-spec-ctrl-Introduce-an-option-to-control-L1D_FLUSH-for-HVM-HAP-guests.patch
+Patch41: 5b72fbbf-x86-Make-spec-ctrl-no-a-global-disable-of-all-mitigations.patch
+Patch42: 5b72fbbf-xl.conf-Add-global-affinity-masks.patch
+Patch43: 5b74190e-x86-hvm-ioreq-MMIO-range-check-honor-DF.patch
+Patch44: 5b75afef-x86-setup-avoid-OoB-E820-lookup.patch
+Patch45: 5b76b780-rangeset-inquiry-functions-tolerate-NULL.patch
+Patch46: 5b83c654-VT-d-dmar-iommu-mem-leak-fix.patch
+Patch47: 5b8d5832-x86-assorted-array_index_nospec-insertions.patch
+Patch48: 5b8fae26-tools-libxl-correct-vcpu-affinity-output-with-sparse-physical-cpu-map.patch
+Patch49: 5b8fae26-xen-fill-topology-info-for-all-present-cpus.patch
+Patch50: 5b8fb5af-tools-xl-refuse-to-set-number-of-vcpus-to-0-via-xl-vcpu-set.patch
+Patch51: 5b9784ad-x86-HVM-drop-hvm_fetch_from_guest_linear.patch
+Patch52: 5b9784d2-x86-HVM-add-known_gla-helper.patch
+Patch53: 5b9784f2-x86-HVM-split-page-straddling-accesses.patch
# Our platform specific patches
Patch400: xen-destdir.patch
Patch401: vif-bridge-no-iptables.patch
@@ -435,6 +449,19 @@
%patch38 -p1
%patch39 -p1
%patch40 -p1
+%patch41 -p1
+%patch42 -p1
+%patch43 -p1
+%patch44 -p1
+%patch45 -p1
+%patch46 -p1
+%patch47 -p1
+%patch48 -p1
+%patch49 -p1
+%patch50 -p1
+%patch51 -p1
+%patch52 -p1
+%patch53 -p1
# Our platform specific patches
%patch400 -p1
%patch401 -p1
@@ -1067,6 +1094,7 @@
/etc/xen/scripts/xen-network-common.sh
/etc/xen/scripts/xen-script-common.sh
/etc/xen/scripts/colo-proxy-setup
+/etc/xen/scripts/remus-netbuf-setup
%dir /usr/lib/supportconfig
%dir /usr/lib/supportconfig/plugins
/usr/lib/supportconfig/plugins/xen
++++++ 5b6d84ac-x86-fix-improve-vlapic-read-write.patch ++++++
# Commit b6f43c14cef3af8477a9eca4efab87dd150a2885
# Date 2018-08-10 13:27:24 +0100
# Author Andrew Cooper
# Committer Andrew Cooper
x86/vlapic: Bugfixes and improvements to vlapic_{read,write}()
Firstly, there is no 'offset' boundary check on the non-32-bit write path
before the call to vlapic_read_aligned(), which allows an attacker to read
beyond the end of vlapic->regs->data[], which is only 1024 bytes long.
However, as the backing memory is a domheap page, and misaligned accesses get
chunked down to single bytes across page boundaries, I can't spot any
XSA-worthy problems which occur from the overrun.
On real hardware, bad accesses don't instantly crash the machine. Their
behaviour is undefined, but the domain_crash() prohibits sensible testing.
Behave more like other x86 MMIO and terminate bad accesses with appropriate
defaults.
While making these changes, clean up and simplify the the smaller-access
handling. In particular, avoid pointer based mechansims for 1/2-byte reads so
as to avoid forcing the value to be spilled to the stack.
add/remove: 0/0 grow/shrink: 0/2 up/down: 0/-175 (-175)
function old new delta
vlapic_read 211 142 -69
vlapic_write 304 198 -106
Finally, there are a plethora of read/write functions in the vlapic namespace,
so rename these to vlapic_mmio_{read,write}() to make their purpose more
clear.
Signed-off-by: Andrew Cooper
Reviewed-by: Jan Beulich
Reviewed-by: Roger Pau Monné
--- a/xen/arch/x86/hvm/vlapic.c
+++ b/xen/arch/x86/hvm/vlapic.c
@@ -616,56 +616,37 @@ static uint32_t vlapic_read_aligned(cons
return 0;
}
-static int vlapic_read(
- struct vcpu *v, unsigned long address,
- unsigned int len, unsigned long *pval)
+static int vlapic_mmio_read(struct vcpu *v, unsigned long address,
+ unsigned int len, unsigned long *pval)
{
struct vlapic *vlapic = vcpu_vlapic(v);
unsigned int offset = address - vlapic_base_address(vlapic);
- unsigned int alignment = offset & 3, tmp, result = 0;
+ unsigned int alignment = offset & 0xf, result = 0;
- if ( offset > (APIC_TDCR + 0x3) )
- goto out;
-
- tmp = vlapic_read_aligned(vlapic, offset & ~3);
-
- switch ( len )
+ /*
+ * APIC registers are 32-bit values, aligned on 128-bit boundaries, and
+ * should be accessed with 32-bit wide loads.
+ *
+ * Some processors support smaller accesses, so we allow any access which
+ * fully fits within the 32-bit register.
+ */
+ if ( (alignment + len) <= 4 && offset <= (APIC_TDCR + 3) )
{
- case 1:
- result = *((unsigned char *)&tmp + alignment);
- break;
-
- case 2:
- if ( alignment == 3 )
- goto unaligned_exit_and_crash;
- result = *(unsigned short *)((unsigned char *)&tmp + alignment);
- break;
+ uint32_t reg = vlapic_read_aligned(vlapic, offset & ~0xf);
- case 4:
- if ( alignment != 0 )
- goto unaligned_exit_and_crash;
- result = *(unsigned int *)((unsigned char *)&tmp + alignment);
- break;
+ switch ( len )
+ {
+ case 1: result = (uint8_t) (reg >> (alignment * 8)); break;
+ case 2: result = (uint16_t)(reg >> (alignment * 8)); break;
+ case 4: result = reg; break;
+ }
- default:
- gdprintk(XENLOG_ERR, "Local APIC read with len=%#x, "
- "should be 4 instead.\n", len);
- goto exit_and_crash;
+ HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset %#x with length %#x, "
+ "and the result is %#x", offset, len, result);
}
- HVM_DBG_LOG(DBG_LEVEL_VLAPIC, "offset %#x with length %#x, "
- "and the result is %#x", offset, len, result);
-
- out:
*pval = result;
return X86EMUL_OKAY;
-
- unaligned_exit_and_crash:
- gdprintk(XENLOG_ERR, "Unaligned LAPIC read len=%#x at offset=%#x.\n",
- len, offset);
- exit_and_crash:
- domain_crash(v->domain);
- return X86EMUL_OKAY;
}
int hvm_x2apic_msr_read(struct vcpu *v, unsigned int msr, uint64_t *msr_content)
@@ -908,12 +889,14 @@ static void vlapic_reg_write(struct vcpu
}
}
-static int vlapic_write(struct vcpu *v, unsigned long address,
- unsigned int len, unsigned long val)
+static int vlapic_mmio_write(struct vcpu *v, unsigned long address,
+ unsigned int len, unsigned long val)
{
struct vlapic *vlapic = vcpu_vlapic(v);
unsigned int offset = address - vlapic_base_address(vlapic);
- int rc = X86EMUL_OKAY;
+ unsigned int alignment = offset & 0xf;
+
+ offset &= ~0xf;
if ( offset != APIC_EOI )
HVM_DBG_LOG(DBG_LEVEL_VLAPIC,
@@ -921,49 +904,38 @@ static int vlapic_write(struct vcpu *v,
offset, len, val);
/*
- * According to the IA32 Manual, all accesses should be 32 bits.
- * Some OSes do 8- or 16-byte accesses, however.
+ * APIC registers are 32-bit values, aligned on 128-bit boundaries, and
+ * should be accessed with 32-bit wide stores.
+ *
+ * Some processors support smaller accesses, so we allow any access which
+ * fully fits within the 32-bit register.
*/
- if ( unlikely(len != 4) )
+ if ( (alignment + len) <= 4 && offset <= APIC_TDCR )
{
- unsigned int tmp = vlapic_read_aligned(vlapic, offset & ~3);
- unsigned char alignment = (offset & 3) * 8;
-
- switch ( len )
+ if ( unlikely(len < 4) )
{
- case 1:
- val = ((tmp & ~(0xffU << alignment)) |
- ((val & 0xff) << alignment));
- break;
+ uint32_t reg = vlapic_read_aligned(vlapic, offset);
- case 2:
- if ( alignment & 1 )
- goto unaligned_exit_and_crash;
- val = ((tmp & ~(0xffffU << alignment)) |
- ((val & 0xffff) << alignment));
- break;
+ alignment *= 8;
- default:
- gprintk(XENLOG_ERR, "LAPIC write with len %u\n", len);
- goto exit_and_crash;
+ switch ( len )
+ {
+ case 1:
+ val = ((reg & ~(0xffU << alignment)) |
+ ((val & 0xff) << alignment));
+ break;
+
+ case 2:
+ val = ((reg & ~(0xffffU << alignment)) |
+ ((val & 0xffff) << alignment));
+ break;
+ }
}
- gdprintk(XENLOG_INFO, "Notice: LAPIC write with len %u\n", len);
- offset &= ~3;
+ vlapic_reg_write(v, offset, val);
}
- else if ( unlikely(offset & 3) )
- goto unaligned_exit_and_crash;
-
- vlapic_reg_write(v, offset, val);
return X86EMUL_OKAY;
-
- unaligned_exit_and_crash:
- gprintk(XENLOG_ERR, "Unaligned LAPIC write: len=%u offset=%#x.\n",
- len, offset);
- exit_and_crash:
- domain_crash(v->domain);
- return rc;
}
int vlapic_apicv_write(struct vcpu *v, unsigned int offset)
@@ -1077,8 +1049,8 @@ static int vlapic_range(struct vcpu *v,
static const struct hvm_mmio_ops vlapic_mmio_ops = {
.check = vlapic_range,
- .read = vlapic_read,
- .write = vlapic_write
+ .read = vlapic_mmio_read,
+ .write = vlapic_mmio_write,
};
static void set_x2apic_id(struct vlapic *vlapic)
++++++ 5b72fbbe-ARM-disable-grant-table-v2.patch ++++++
# Commit 9a5c16a3e75778c8a094ca87784d93b74676f46c
# Date 2018-08-14 16:56:46 +0100
# Author Stefano Stabellini
# Committer Andrew Cooper
ARM: disable grant table v2
It was never expected to work, the implementation is incomplete.
As a side effect, it also prevents guests from triggering a
"BUG_ON(page_get_owner(pg) != d)" in gnttab_unpopulate_status_frames().
This is XSA-268.
Signed-off-by: Stefano Stabellini
Acked-by: Jan Beulich
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -936,6 +936,8 @@ version are 1 and 2.
use of grant table v2 without transitive grants is an ABI breakage from the
guests point of view.
+The usage of gnttab v2 is not security supported on ARM platforms.
+
### gnttab\_max\_frames
`= <integer>`
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -97,7 +97,11 @@ static unsigned int __read_mostly max_ma
DEFAULT_MAX_MAPTRACK_FRAMES;
integer_runtime_param("gnttab_max_maptrack_frames", max_maptrack_frames);
-static unsigned int __read_mostly opt_gnttab_max_version = 2;
+#ifndef GNTTAB_MAX_VERSION
+#define GNTTAB_MAX_VERSION 2
+#endif
+
+static unsigned int __read_mostly opt_gnttab_max_version = GNTTAB_MAX_VERSION;
static bool __read_mostly opt_transitive_grants = true;
static int __init parse_gnttab(const char *s)
--- a/xen/include/asm-arm/grant_table.h
+++ b/xen/include/asm-arm/grant_table.h
@@ -7,6 +7,7 @@
#include
#define INITIAL_NR_GRANT_FRAMES 1U
+#define GNTTAB_MAX_VERSION 1
struct grant_table_arch {
gfn_t *shared_gfn;
++++++ 5b72fbbe-oxenstored-eval-order.patch ++++++
# Commit 73392c7fd14c59f8c96e0b2eeeb329e4ae9086b6
# Date 2018-08-14 16:56:46 +0100
# Author Christian Lindig
# Committer Andrew Cooper
tools/oxenstored: Make evaluation order explicit
In Store.path_write(), Path.apply_modify() updates the node_created
reference and both the value of apply_modify() and node_created are
returned by path_write().
At least with OCaml 4.06.1 this leads to the value of node_created being
returned *before* it is updated by apply_modify(). This in turn leads
to the quota for a domain not being updated in Store.write(). Hence, a
guest can create an unlimited number of entries in xenstore.
The fix is to make evaluation order explicit.
This is XSA-272.
Signed-off-by: Christian Lindig
Reviewed-by: Rob Hoes
--- a/tools/ocaml/xenstored/store.ml
+++ b/tools/ocaml/xenstored/store.ml
@@ -262,7 +262,8 @@ let path_write store perm path value =
Node.check_perm store.root perm Perms.WRITE;
Node.set_value store.root value, false
) else
- Path.apply_modify store.root path do_write, !node_created
+ let root = Path.apply_modify store.root path do_write in
+ root, !node_created
let path_rm store perm path =
let do_rm node name =
++++++ 5b72fbbf-2-spec-ctrl-Introduce-an-option-to-control-L1TF-mitigation-for-PV-guests.patch ++++++
--- /var/tmp/diff_new_pack.RbomEQ/_old 2018-09-18 11:39:20.368168554 +0200
+++ /var/tmp/diff_new_pack.RbomEQ/_new 2018-09-18 11:39:20.372168550 +0200
@@ -17,11 +17,9 @@
Reviewed-by: Jan Beulich
(cherry picked from commit 66a4e986819a86ba66ca2fe9d925e62a4fd30114)
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index e5e7fdc405..763cc1d878 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
-@@ -1544,6 +1544,30 @@ do; there may be other custom operating systems which do. If you're
+@@ -1546,6 +1546,30 @@ do; there may be other custom operating
certain you don't plan on having PV guests which use this feature,
turning it off can reduce the attack surface.
@@ -52,8 +50,6 @@
### pv-shim (x86)
`= <boolean>`
-diff --git a/xen/arch/x86/Kconfig b/xen/arch/x86/Kconfig
-index f64fc56739..cfba4a708c 100644
--- a/xen/arch/x86/Kconfig
+++ b/xen/arch/x86/Kconfig
@@ -72,6 +72,7 @@ config SHADOW_PAGING
@@ -64,8 +60,6 @@
Under a small number of specific workloads, shadow paging may be
deliberately used as a performance optimisation.
-diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
-index fe15a58de0..7995e27218 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -23,6 +23,7 @@
@@ -76,7 +70,7 @@
#include
#include
-@@ -203,6 +204,55 @@ static int __init parse_spec_ctrl(const char *s)
+@@ -203,6 +204,55 @@ static int __init parse_spec_ctrl(const
}
custom_param("spec-ctrl", parse_spec_ctrl);
@@ -93,7 +87,7 @@
+
+ /* Interpret 'pv-l1tf' alone in its positive boolean form. */
+ if ( *s == '\0' )
-+ opt_xpti = OPT_PV_L1TF_DOM0 | OPT_PV_L1TF_DOMU;
++ opt_pv_l1tf = OPT_PV_L1TF_DOM0 | OPT_PV_L1TF_DOMU;
+
+ do {
+ ss = strchr(s, ',');
@@ -132,7 +126,7 @@
static void __init print_details(enum ind_thunk thunk, uint64_t caps)
{
unsigned int _7d0 = 0, e8b = 0, tmp;
-@@ -226,9 +276,16 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+@@ -226,9 +276,16 @@ static void __init print_details(enum in
(caps & ARCH_CAPS_RSBA) ? " RSBA" : "",
(caps & ARCH_CAPS_SSB_NO) ? " SSB_NO" : "");
@@ -152,7 +146,7 @@
/* Settings for Xen's protection, irrespective of guests. */
printk(" Xen settings: BTI-Thunk %s, SPEC_CTRL: %s%s, Other:%s\n",
-@@ -242,6 +299,13 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+@@ -242,6 +299,13 @@ static void __init print_details(enum in
(default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-",
opt_ibpb ? " IBPB" : "");
@@ -166,7 +160,7 @@
/*
* Alternatives blocks for protecting against and/or virtualising
* mitigation support for guests.
-@@ -263,6 +327,10 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+@@ -263,6 +327,10 @@ static void __init print_details(enum in
printk(" XPTI (64-bit PV only): Dom0 %s, DomU %s\n",
opt_xpti & OPT_XPTI_DOM0 ? "enabled" : "disabled",
opt_xpti & OPT_XPTI_DOMU ? "enabled" : "disabled");
@@ -177,7 +171,7 @@
}
/* Calculate whether Retpoline is known-safe on this CPU. */
-@@ -786,6 +854,21 @@ void __init init_speculation_mitigations(void)
+@@ -786,6 +854,21 @@ void __init init_speculation_mitigations
l1tf_calculations(caps);
@@ -199,8 +193,6 @@
print_details(thunk, caps);
/*
-diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
-index d7e8ed0f5f..cdf5737dc2 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -38,6 +38,10 @@ extern int8_t opt_xpti;
++++++ 5b72fbbf-8-spec-ctrl-Introduce-an-option-to-control-L1D_FLUSH-for-HVM-HAP-guests.patch ++++++
--- /var/tmp/diff_new_pack.RbomEQ/_old 2018-09-18 11:39:20.408168512 +0200
+++ /var/tmp/diff_new_pack.RbomEQ/_new 2018-09-18 11:39:20.412168508 +0200
@@ -18,11 +18,9 @@
Reviewed-by: Jan Beulich
(cherry picked from commit 3bd36952dab60290f33d6791070b57920e10754b)
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index 158b5bb919..57ef18194a 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
-@@ -1789,7 +1789,8 @@ false disable the quirk workaround, which is also the default.
+@@ -1791,7 +1791,8 @@ false disable the quirk workaround, whic
### spec-ctrl (x86)
`= List of [ <bool>, xen=<bool>, {pv,hvm,msr-sc,rsb}=<bool>,
@@ -32,7 +30,7 @@
Controls for speculative execution sidechannel mitigations. By default, Xen
will pick the most appropriate mitigations based on compiled in support,
-@@ -1844,6 +1845,12 @@ from using fully eager FPU context switches. This is currently implemented as
+@@ -1846,6 +1847,12 @@ from using fully eager FPU context switc
a global control. By default, Xen will choose to use fully eager context
switches on hardware believed to speculate past #NM exceptions.
@@ -45,8 +43,6 @@
### sync\_console
`= <boolean>`
-diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
-index 30a33dd0bd..2ba0c40808 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -38,6 +38,7 @@
@@ -57,7 +53,7 @@
#include
#include
-@@ -1274,6 +1275,10 @@ static int construct_vmcs(struct vcpu *v)
+@@ -1274,6 +1275,10 @@ static int construct_vmcs(struct vcpu *v
vmx_vlapic_msr_changed(v);
@@ -68,8 +64,6 @@
out:
vmx_vmcs_exit(v);
-diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
-index 9bcc2b6adc..59baebb959 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -19,11 +19,13 @@
@@ -94,7 +88,7 @@
bool __initdata bsp_delay_spec_ctrl;
uint8_t __read_mostly default_xen_spec_ctrl;
-@@ -139,6 +142,7 @@ static int __init parse_spec_ctrl(const char *s)
+@@ -139,6 +142,7 @@ static int __init parse_spec_ctrl(const
opt_ibrs = 0;
opt_ibpb = false;
opt_ssbd = false;
@@ -102,7 +96,7 @@
}
else if ( val > 0 )
rc = -EINVAL;
-@@ -194,6 +198,8 @@ static int __init parse_spec_ctrl(const char *s)
+@@ -194,6 +198,8 @@ static int __init parse_spec_ctrl(const
opt_ssbd = val;
else if ( (val = parse_boolean("eager-fpu", s, ss)) >= 0 )
opt_eager_fpu = val;
@@ -111,7 +105,7 @@
else
rc = -EINVAL;
-@@ -290,7 +296,7 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+@@ -290,7 +296,7 @@ static void __init print_details(enum in
"\n");
/* Settings for Xen's protection, irrespective of guests. */
@@ -120,7 +114,7 @@
thunk == THUNK_NONE ? "N/A" :
thunk == THUNK_RETPOLINE ? "RETPOLINE" :
thunk == THUNK_LFENCE ? "LFENCE" :
-@@ -299,7 +305,8 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
+@@ -299,7 +305,8 @@ static void __init print_details(enum in
(default_xen_spec_ctrl & SPEC_CTRL_IBRS) ? "IBRS+" : "IBRS-",
!boot_cpu_has(X86_FEATURE_SSBD) ? "" :
(default_xen_spec_ctrl & SPEC_CTRL_SSBD) ? " SSBD+" : " SSBD-",
@@ -130,7 +124,7 @@
/* L1TF diagnostics, printed if vulnerable or PV shadowing is in use. */
if ( cpu_has_bug_l1tf || opt_pv_l1tf )
-@@ -871,6 +878,33 @@ void __init init_speculation_mitigations(void)
+@@ -871,6 +878,33 @@ void __init init_speculation_mitigations
opt_pv_l1tf = OPT_PV_L1TF_DOMU;
}
@@ -164,8 +158,6 @@
print_details(thunk, caps);
/*
-diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
-index cdf5737dc2..8f8aad40bb 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -29,6 +29,7 @@ void init_speculation_mitigations(void);
++++++ 5b72fbbf-x86-Make-spec-ctrl-no-a-global-disable-of-all-mitigations.patch ++++++
--- /var/tmp/diff_new_pack.RbomEQ/_old 2018-09-18 11:39:20.424168495 +0200
+++ /var/tmp/diff_new_pack.RbomEQ/_new 2018-09-18 11:39:20.424168495 +0200
@@ -15,11 +15,9 @@
Acked-by: Andrew Cooper
(cherry picked from commit d8800a82c3840b06b17672eddee4878bbfdacc6d)
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index 57ef18194a..0886706368 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
-@@ -1802,10 +1802,15 @@ extreme care.**
+@@ -1804,10 +1804,15 @@ extreme care.**
An overall boolean value, `spec-ctrl=no`, can be specified to turn off all
mitigations, including pieces of infrastructure used to virtualise certain
@@ -39,11 +37,9 @@
The booleans `pv=`, `hvm=`, `msr-sc=` and `rsb=` offer fine grained control
over the alternative blocks used by Xen. These impact Xen's ability to
-diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
-index 59baebb959..f0c50d6703 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
-@@ -134,6 +134,15 @@ static int __init parse_spec_ctrl(const char *s)
+@@ -134,6 +134,15 @@ static int __init parse_spec_ctrl(const
opt_eager_fpu = 0;
++++++ 5b74190e-x86-hvm-ioreq-MMIO-range-check-honor-DF.patch ++++++
# Commit 60a56dc0064a00830663ffe48215dcd080cb9504
# Date 2018-08-15 14:14:06 +0200
# Author Paul Durrant
# Committer Jan Beulich
x86/hvm/ioreq: MMIO range checking completely ignores direction flag
hvm_select_ioreq_server() is used to route an ioreq to the appropriate
ioreq server. For MMIO this is done by comparing the range of the ioreq
to the ranges registered by the device models of each ioreq server.
Unfortunately the calculation of the range if the ioreq completely ignores
the direction flag and thus may calculate the wrong range for comparison.
Thus the ioreq may either be routed to the wrong server or erroneously
terminated by null_ops.
NOTE: The patch also fixes whitespace in the switch statement to make it
style compliant.
Signed-off-by: Paul Durrant
Reviewed-by: Andrew Cooper
--- a/xen/arch/x86/hvm/ioreq.c
+++ b/xen/arch/x86/hvm/ioreq.c
@@ -1353,20 +1353,25 @@ struct hvm_ioreq_server *hvm_select_iore
switch ( type )
{
- unsigned long end;
+ unsigned long start, end;
case XEN_DMOP_IO_RANGE_PORT:
- end = addr + p->size - 1;
- if ( rangeset_contains_range(r, addr, end) )
+ start = addr;
+ end = start + p->size - 1;
+ if ( rangeset_contains_range(r, start, end) )
return s;
break;
+
case XEN_DMOP_IO_RANGE_MEMORY:
- end = addr + (p->size * p->count) - 1;
- if ( rangeset_contains_range(r, addr, end) )
+ start = hvm_mmio_first_byte(p);
+ end = hvm_mmio_last_byte(p);
+
+ if ( rangeset_contains_range(r, start, end) )
return s;
break;
+
case XEN_DMOP_IO_RANGE_PCI:
if ( rangeset_contains_singleton(r, addr >> 32) )
{
++++++ 5b75afef-x86-setup-avoid-OoB-E820-lookup.patch ++++++
# Commit 3e4ec07e14bce81f6ae22c31ff1302d1f297a226
# Date 2018-08-16 18:10:07 +0100
# Author Andrew Cooper
# Committer Andrew Cooper
x86/setup: Avoid OoB E820 lookup when calculating the L1TF safe address
A number of corner cases (most obviously, no-real-mode and no Multiboot memory
map) can end up with e820_raw.nr_map being 0, at which point the L1TF
calculation will underflow.
Spotted by Coverity.
Signed-off-by: Andrew Cooper
Reviewed-by: Roger Pau Monné
Reviewed-by: Jan Beulich
Reviewed-by: Wei Liu
--- a/xen/arch/x86/setup.c
+++ b/xen/arch/x86/setup.c
@@ -912,7 +912,7 @@ void __init noreturn __start_xen(unsigne
/* Sanitise the raw E820 map to produce a final clean version. */
max_page = raw_max_page = init_e820(memmap_type, &e820_raw);
- if ( !efi_enabled(EFI_BOOT) )
+ if ( !efi_enabled(EFI_BOOT) && e820_raw.nr_map >= 1 )
{
/*
* Supplement the heuristics in l1tf_calculations() by assuming that
++++++ 5b76b780-rangeset-inquiry-functions-tolerate-NULL.patch ++++++
# Commit ad0a9f273d6d6f0545cd9b708b2d4be581a6cadd
# Date 2018-08-17 13:54:40 +0200
# Author Jan Beulich
# Committer Jan Beulich
rangeset: make inquiry functions tolerate NULL inputs
Rather than special casing the ->iomem_caps check in x86's
get_page_from_l1e() for the dom_xen case, let's be more tolerant in
general, along the lines of rangeset_is_empty(): A never allocated
rangeset can't possibly contain or overlap any range.
Reported-by: Andrew Cooper
Signed-off-by: Jan Beulich
Reviewed-by: Roger Pau Monné
Reviewed-by: Wei Liu
--- a/xen/common/rangeset.c
+++ b/xen/common/rangeset.c
@@ -256,6 +256,9 @@ bool_t rangeset_contains_range(
ASSERT(s <= e);
+ if ( !r )
+ return false;
+
read_lock(&r->lock);
x = find_range(r, s);
contains = (x && (x->e >= e));
@@ -272,6 +275,9 @@ bool_t rangeset_overlaps_range(
ASSERT(s <= e);
+ if ( !r )
+ return false;
+
read_lock(&r->lock);
x = find_range(r, e);
overlaps = (x && (s <= x->e));
++++++ 5b83c654-VT-d-dmar-iommu-mem-leak-fix.patch ++++++
# Commit fd07b6648c4c8891dca5bd0f7ef174b6831f80b2
# Date 2018-08-27 11:37:24 +0200
# Author Zhenzhong Duan
# Committer Jan Beulich
VT-d/dmar: iommu mem leak fix
Release memory allocated for drhd iommu in error path.
Signed-off-by: Zhenzhong Duan
Acked-by: Kevin Tian
--- a/xen/drivers/passthrough/vtd/dmar.c
+++ b/xen/drivers/passthrough/vtd/dmar.c
@@ -100,6 +100,7 @@ static void __init disable_all_dmar_unit
{
list_del(&drhd->list);
scope_devices_free(&drhd->scope);
+ iommu_free(drhd);
xfree(drhd);
}
list_for_each_entry_safe ( rmrr, _rmrr, &acpi_rmrr_units, list )
++++++ 5b8d5832-x86-assorted-array_index_nospec-insertions.patch ++++++
# Commit 3f2002614af51dfd507168a1696658bac91155ce
# Date 2018-09-03 17:50:10 +0200
# Author Jan Beulich
# Committer Jan Beulich
x86: assorted array_index_nospec() insertions
Don't chance having Spectre v1 (including BCBS) gadgets. In some of the
cases the insertions are more of precautionary nature rather than there
provably being a gadget, but I think we should err on the safe (secure)
side here.
Signed-off-by: Jan Beulich
Reviewed-by: Paul Durrant
Acked-by: Razvan Cojocaru
Reviewed-by: Andrew Cooper
--- a/xen/arch/x86/hvm/dm.c
+++ b/xen/arch/x86/hvm/dm.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include
@@ -232,7 +233,7 @@ static int set_mem_type(struct domain *d
struct xen_dm_op_set_mem_type *data)
{
xen_pfn_t last_pfn = data->first_pfn + data->nr - 1;
- unsigned int iter = 0;
+ unsigned int iter = 0, mem_type;
int rc = 0;
/* Interface types to internal p2m types */
@@ -252,7 +253,9 @@ static int set_mem_type(struct domain *d
unlikely(data->mem_type == HVMMEM_unused) )
return -EINVAL;
- if ( data->mem_type == HVMMEM_ioreq_server )
+ mem_type = array_index_nospec(data->mem_type, ARRAY_SIZE(memtype));
+
+ if ( mem_type == HVMMEM_ioreq_server )
{
unsigned int flags;
@@ -279,10 +282,10 @@ static int set_mem_type(struct domain *d
if ( p2m_is_shared(t) )
rc = -EAGAIN;
- else if ( !allow_p2m_type_change(t, memtype[data->mem_type]) )
+ else if ( !allow_p2m_type_change(t, memtype[mem_type]) )
rc = -EINVAL;
else
- rc = p2m_change_type_one(d, pfn, t, memtype[data->mem_type]);
+ rc = p2m_change_type_one(d, pfn, t, memtype[mem_type]);
put_gfn(d, pfn);
@@ -387,6 +390,8 @@ static int dm_op(const struct dmop_args
goto out;
}
+ op.op = array_index_nospec(op.op, ARRAY_SIZE(op_size));
+
if ( op_args->buf[0].size < offset + op_size[op.op] )
goto out;
@@ -739,7 +744,7 @@ int compat_dm_op(domid_t domid,
return -E2BIG;
args.domid = domid;
- args.nr_bufs = nr_bufs;
+ args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) + 1);
for ( i = 0; i < args.nr_bufs; i++ )
{
@@ -776,7 +781,7 @@ long do_dm_op(domid_t domid,
return -E2BIG;
args.domid = domid;
- args.nr_bufs = nr_bufs;
+ args.nr_bufs = array_index_nospec(nr_bufs, ARRAY_SIZE(args.buf) + 1);
if ( copy_from_guest_offset(&args.buf[0], bufs, 0, args.nr_bufs) )
return -EFAULT;
--- a/xen/arch/x86/hvm/hypercall.c
+++ b/xen/arch/x86/hvm/hypercall.c
@@ -20,6 +20,7 @@
*/
#include
#include
+#include
#include
@@ -181,8 +182,15 @@ int hvm_hypercall(struct cpu_user_regs *
BUILD_BUG_ON(ARRAY_SIZE(hvm_hypercall_table) >
ARRAY_SIZE(hypercall_args_table));
- if ( (eax >= ARRAY_SIZE(hvm_hypercall_table)) ||
- !hvm_hypercall_table[eax].native )
+ if ( eax >= ARRAY_SIZE(hvm_hypercall_table) )
+ {
+ regs->rax = -ENOSYS;
+ return HVM_HCALL_completed;
+ }
+
+ eax = array_index_nospec(eax, ARRAY_SIZE(hvm_hypercall_table));
+
+ if ( !hvm_hypercall_table[eax].native )
{
regs->rax = -ENOSYS;
return HVM_HCALL_completed;
--- a/xen/arch/x86/mm/mem_access.c
+++ b/xen/arch/x86/mm/mem_access.c
@@ -23,6 +23,7 @@
#include /* copy_from_guest() */
#include
+#include
#include
#include
#include
@@ -325,6 +326,7 @@ static bool xenmem_access_to_p2m_access(
switch ( xaccess )
{
case 0 ... ARRAY_SIZE(memaccess) - 1:
+ xaccess = array_index_nospec(xaccess, ARRAY_SIZE(memaccess));
*paccess = memaccess[xaccess];
break;
case XENMEM_access_default:
--- a/xen/arch/x86/pv/hypercall.c
+++ b/xen/arch/x86/pv/hypercall.c
@@ -21,6 +21,7 @@
#include
#include
+#include
#include
#define HYPERCALL(x) \
@@ -99,8 +100,15 @@ void pv_hypercall(struct cpu_user_regs *
BUILD_BUG_ON(ARRAY_SIZE(pv_hypercall_table) >
ARRAY_SIZE(hypercall_args_table));
- if ( (eax >= ARRAY_SIZE(pv_hypercall_table)) ||
- !pv_hypercall_table[eax].native )
+ if ( eax >= ARRAY_SIZE(pv_hypercall_table) )
+ {
+ regs->rax = -ENOSYS;
+ return;
+ }
+
+ eax = array_index_nospec(eax, ARRAY_SIZE(pv_hypercall_table));
+
+ if ( !pv_hypercall_table[eax].native )
{
regs->rax = -ENOSYS;
return;
++++++ 5b8fae26-tools-libxl-correct-vcpu-affinity-output-with-sparse-physical-cpu-map.patch ++++++
Subject: tools/libxl: correct vcpu affinity output with sparse physical cpu map
From: Juergen Gross jgross@suse.com Fri Aug 31 17:22:04 2018 +0200
Date: Wed Sep 5 11:21:26 2018 +0100:
Git: 2ec5339ec9218fbf1583fa85b74d1d2f15f1b3b8
With not all physical cpus online (e.g. with smt=0) the output of hte
vcpu affinities is wrong, as the affinity bitmaps are capped after
nr_cpus bits, instead of using max_cpu_id.
Signed-off-by: Juergen Gross
Acked-by: Wei Liu
diff --git a/tools/xl/xl_vcpu.c b/tools/xl/xl_vcpu.c
index 7b7a93d..aef4868 100644
--- a/tools/xl/xl_vcpu.c
+++ b/tools/xl/xl_vcpu.c
@@ -144,13 +144,13 @@ static void vcpulist(int argc, char **argv)
}
for (i = 0; i 0; ++argv, --argc) {
uint32_t domid = find_domain(*argv);
- print_domain_vcpuinfo(domid, physinfo.nr_cpus);
+ print_domain_vcpuinfo(domid, physinfo.max_cpu_id + 1);
}
}
vcpulist_out:
++++++ 5b8fae26-xen-fill-topology-info-for-all-present-cpus.patch ++++++
Subject: xen: fill topology info for all present cpus
From: Juergen Gross jgross@suse.com Fri Aug 31 17:22:05 2018 +0200
Date: Wed Sep 5 11:21:26 2018 +0100:
Git: 780e2d309812e54353259bb9e6c28886e994b065
The topology information obtainable via XEN_SYSCTL_cputopoinfo is
filled rather weird: the size of the array is derived from the highest
online cpu number, so in case there are trailing offline cpus they
will not be included.
On a dual core system with 4 threads booted with smt=0 without this
patch xl info -n will print:
cpu_topology :
cpu: core socket node
0: 0 0 0
1: 0 0 0
2: 1 0 0
while with this patch the output is:
cpu_topology :
cpu: core socket node
0: 0 0 0
1: 0 0 0
2: 1 0 0
3: 1 0 0
Signed-off-by: Juergen Gross
Reviewed-by: Wei Liu
Reviewed-by: Jan Beulich
Acked-by: Julien Grall
diff --git a/xen/common/sysctl.c b/xen/common/sysctl.c
index 8e83c33..c0aa6bd 100644
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -349,7 +349,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
unsigned int i, num_cpus;
struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
- num_cpus = cpumask_last(&cpu_online_map) + 1;
+ num_cpus = cpumask_last(&cpu_present_map) + 1;
if ( !guest_handle_is_null(ti->cputopo) )
{
struct xen_sysctl_cputopo cputopo = { };
++++++ 5b8fb5af-tools-xl-refuse-to-set-number-of-vcpus-to-0-via-xl-vcpu-set.patch ++++++
Subject: tools/xl: refuse to set number of vcpus to 0 via xl vcpu-set
From: Juergen Gross jgross@suse.com Mon Sep 3 14:59:42 2018 +0200
Date: Wed Sep 5 11:53:35 2018 +0100:
Git: 2c0b1824b1cb33a2610f3f55299247f9e0464466
Trying to set the number of vcpus of a domain to 0 isn't refused.
We should not allow that.
Signed-off-by: Juergen Gross
Acked-by: Wei Liu
Signed-off-by: Wei Liu
diff --git a/tools/libxl/libxl_domain.c b/tools/libxl/libxl_domain.c
index 533bcdf..3377bba 100644
--- a/tools/libxl/libxl_domain.c
+++ b/tools/libxl/libxl_domain.c
@@ -1369,6 +1369,12 @@ int libxl_set_vcpuonline(libxl_ctx *ctx, uint32_t domid, libxl_bitmap *cpumap)
}
maxcpus = libxl_bitmap_count_set(cpumap);
+ if (maxcpus == 0)
+ {
+ LOGED(ERROR, domid, "Requested 0 VCPUs!");
+ rc = ERROR_FAIL;
+ goto out;
+ }
if (maxcpus > info.vcpu_max_id + 1)
{
LOGED(ERROR, domid, "Requested %d VCPUs, however maxcpus is %d!",
diff --git a/tools/xl/xl_vcpu.c b/tools/xl/xl_vcpu.c
index aef4868..71d3a5c 100644
--- a/tools/xl/xl_vcpu.c
+++ b/tools/xl/xl_vcpu.c
@@ -13,6 +13,7 @@
*/
#include
+#include
#include
#include
@@ -331,13 +332,14 @@ int main_vcpupin(int argc, char **argv)
static int vcpuset(uint32_t domid, const char* nr_vcpus, int check_host)
{
char *endptr;
- unsigned int max_vcpus, i;
+ unsigned int i;
+ unsigned long max_vcpus;
libxl_bitmap cpumap;
int rc;
libxl_bitmap_init(&cpumap);
max_vcpus = strtoul(nr_vcpus, &endptr, 10);
- if (nr_vcpus == endptr) {
+ if (nr_vcpus == endptr || max_vcpus > INT_MAX) {
fprintf(stderr, "Error: Invalid argument.\n");
return 1;
}
@@ -358,7 +360,7 @@ static int vcpuset(uint32_t domid, const char* nr_vcpus, int check_host)
if (max_vcpus > online_vcpus && max_vcpus > host_cpu) {
fprintf(stderr, "You are overcommmitting! You have %d physical" \
- " CPUs and want %d vCPUs! Aborting, use --ignore-host to" \
+ " CPUs and want %ld vCPUs! Aborting, use --ignore-host to" \
" continue\n", host_cpu, max_vcpus);
return 1;
}
@@ -375,7 +377,7 @@ static int vcpuset(uint32_t domid, const char* nr_vcpus, int check_host)
if (rc == ERROR_DOMAIN_NOTFOUND)
fprintf(stderr, "Domain %u does not exist.\n", domid);
else if (rc)
- fprintf(stderr, "libxl_set_vcpuonline failed domid=%u max_vcpus=%d," \
+ fprintf(stderr, "libxl_set_vcpuonline failed domid=%u max_vcpus=%ld," \
" rc: %d\n", domid, max_vcpus, rc);
libxl_bitmap_dispose(&cpumap);
++++++ 5b9784ad-x86-HVM-drop-hvm_fetch_from_guest_linear.patch ++++++
References: bsc#1094508
# Commit d9067986c93b14371056bd25507ac9606e86c962
# Date 2018-09-11 11:02:37 +0200
# Author Jan Beulich
# Committer Jan Beulich
x86/HVM: drop hvm_fetch_from_guest_linear()
It can easily be expressed through hvm_copy_from_guest_linear(), and in
two cases this even simplifies callers.
Suggested-by: Paul Durrant
Signed-off-by: Jan Beulich
Reviewed-by: Andrew Cooper
Tested-by: Olaf Hering
Reviewed-by: Paul Durrant
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1046,6 +1046,8 @@ static int __hvmemul_read(
pfec |= PFEC_implicit;
else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
pfec |= PFEC_user_mode;
+ if ( access_type == hvm_access_insn_fetch )
+ pfec |= PFEC_insn_fetch;
rc = hvmemul_virtual_to_linear(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
@@ -1057,9 +1059,7 @@ static int __hvmemul_read(
(vio->mmio_gla == (addr & PAGE_MASK)) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
- rc = ((access_type == hvm_access_insn_fetch) ?
- hvm_fetch_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo) :
- hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo));
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
switch ( rc )
{
@@ -2498,9 +2498,10 @@ void hvm_emulate_init_per_insn(
hvm_access_insn_fetch,
&hvmemul_ctxt->seg_reg[x86_seg_cs],
&addr) &&
- hvm_fetch_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
- sizeof(hvmemul_ctxt->insn_buf),
- pfec, NULL) == HVMTRANS_okay) ?
+ hvm_copy_from_guest_linear(hvmemul_ctxt->insn_buf, addr,
+ sizeof(hvmemul_ctxt->insn_buf),
+ pfec | PFEC_insn_fetch,
+ NULL) == HVMTRANS_okay) ?
sizeof(hvmemul_ctxt->insn_buf) : 0;
}
else
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3314,15 +3314,6 @@ enum hvm_translation_result hvm_copy_fro
PFEC_page_present | pfec, pfinfo);
}
-enum hvm_translation_result hvm_fetch_from_guest_linear(
- void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo)
-{
- return __hvm_copy(buf, addr, size, current,
- HVMCOPY_from_guest | HVMCOPY_linear,
- PFEC_page_present | PFEC_insn_fetch | pfec, pfinfo);
-}
-
unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
{
int rc;
@@ -3760,16 +3751,16 @@ void hvm_ud_intercept(struct cpu_user_re
if ( opt_hvm_fep )
{
const struct segment_register *cs = &ctxt.seg_reg[x86_seg_cs];
- uint32_t walk = (ctxt.seg_reg[x86_seg_ss].dpl == 3)
- ? PFEC_user_mode : 0;
+ uint32_t walk = ((ctxt.seg_reg[x86_seg_ss].dpl == 3)
+ ? PFEC_user_mode : 0) | PFEC_insn_fetch;
unsigned long addr;
char sig[5]; /* ud2; .ascii "xen" */
if ( hvm_virtual_to_linear_addr(x86_seg_cs, cs, regs->rip,
sizeof(sig), hvm_access_insn_fetch,
cs, &addr) &&
- (hvm_fetch_from_guest_linear(sig, addr, sizeof(sig),
- walk, NULL) == HVMTRANS_okay) &&
+ (hvm_copy_from_guest_linear(sig, addr, sizeof(sig),
+ walk, NULL) == HVMTRANS_okay) &&
(memcmp(sig, "\xf\xbxen", sizeof(sig)) == 0) )
{
regs->rip += sizeof(sig);
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -203,10 +203,10 @@ hvm_read(enum x86_segment seg,
if ( rc || !bytes )
return rc;
- if ( access_type == hvm_access_insn_fetch )
- rc = hvm_fetch_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
- else
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, 0, &pfinfo);
+ rc = hvm_copy_from_guest_linear(p_data, addr, bytes,
+ (access_type == hvm_access_insn_fetch
+ ? PFEC_insn_fetch : 0),
+ &pfinfo);
switch ( rc )
{
@@ -418,8 +418,9 @@ const struct x86_emulate_ops *shadow_ini
(!hvm_translate_virtual_addr(
x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_linear(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+ !hvm_copy_from_guest_linear(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+ PFEC_insn_fetch, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
return &hvm_shadow_emulator_ops;
@@ -447,8 +448,9 @@ void shadow_continue_emulation(struct sh
(!hvm_translate_virtual_addr(
x86_seg_cs, regs->rip, sizeof(sh_ctxt->insn_buf),
hvm_access_insn_fetch, sh_ctxt, &addr) &&
- !hvm_fetch_from_guest_linear(
- sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf), 0, NULL))
+ !hvm_copy_from_guest_linear(
+ sh_ctxt->insn_buf, addr, sizeof(sh_ctxt->insn_buf),
+ PFEC_insn_fetch, NULL))
? sizeof(sh_ctxt->insn_buf) : 0;
sh_ctxt->insn_buf_eip = regs->rip;
}
--- a/xen/include/asm-x86/hvm/support.h
+++ b/xen/include/asm-x86/hvm/support.h
@@ -100,9 +100,6 @@ enum hvm_translation_result hvm_copy_to_
enum hvm_translation_result hvm_copy_from_guest_linear(
void *buf, unsigned long addr, int size, uint32_t pfec,
pagefault_info_t *pfinfo);
-enum hvm_translation_result hvm_fetch_from_guest_linear(
- void *buf, unsigned long addr, int size, uint32_t pfec,
- pagefault_info_t *pfinfo);
/*
* Get a reference on the page under an HVM physical or linear address. If
++++++ 5b9784d2-x86-HVM-add-known_gla-helper.patch ++++++
References: bsc#1094508
# Commit 9f232721deaeb9f56eeffb555c4b7ecd62708667
# Date 2018-09-11 11:03:14 +0200
# Author Jan Beulich
# Committer Jan Beulich
x86/HVM: add known_gla() emulation helper
... as a central place to do respective checking for whether the
translation for the linear address is available as well as usable.
Signed-off-by: Jan Beulich
Reviewed-by: Paul Durrant
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1027,6 +1027,26 @@ static inline int hvmemul_linear_mmio_wr
pfec, hvmemul_ctxt, translate);
}
+static bool known_gla(unsigned long addr, unsigned int bytes, uint32_t pfec)
+{
+ const struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
+
+ if ( pfec & PFEC_write_access )
+ {
+ if ( !vio->mmio_access.write_access )
+ return false;
+ }
+ else if ( pfec & PFEC_insn_fetch )
+ {
+ if ( !vio->mmio_access.insn_fetch )
+ return false;
+ }
+ else if ( !vio->mmio_access.read_access )
+ return false;
+
+ return vio->mmio_gla == (addr & PAGE_MASK);
+}
+
static int __hvmemul_read(
enum x86_segment seg,
unsigned long offset,
@@ -1035,11 +1055,9 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- struct vcpu *curr = current;
pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
if ( is_x86_system_segment(seg) )
@@ -1053,10 +1071,7 @@ static int __hvmemul_read(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( ((access_type != hvm_access_insn_fetch
- ? vio->mmio_access.read_access
- : vio->mmio_access.insn_fetch)) &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
+ if ( known_gla(addr, bytes, pfec) )
return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
@@ -1157,10 +1172,8 @@ static int hvmemul_write(
{
struct hvm_emulate_ctxt *hvmemul_ctxt =
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
- struct vcpu *curr = current;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = &curr->arch.hvm_vcpu.hvm_io;
int rc;
void *mapping;
@@ -1174,8 +1187,7 @@ static int hvmemul_write(
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( vio->mmio_access.write_access &&
- (vio->mmio_gla == (addr & PAGE_MASK)) )
+ if ( known_gla(addr, bytes, pfec) )
return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
@@ -1204,7 +1216,6 @@ static int hvmemul_rmw(
container_of(ctxt, struct hvm_emulate_ctxt, ctxt);
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
- struct hvm_vcpu_io *vio = ¤t->arch.hvm_vcpu.hvm_io;
int rc;
void *mapping;
@@ -1230,8 +1241,7 @@ static int hvmemul_rmw(
else
{
unsigned long data = 0;
- bool known_gpfn = vio->mmio_access.write_access &&
- vio->mmio_gla == (addr & PAGE_MASK);
+ bool known_gpfn = known_gla(addr, bytes, pfec);
if ( bytes > sizeof(data) )
return X86EMUL_UNHANDLEABLE;
++++++ 5b9784f2-x86-HVM-split-page-straddling-accesses.patch ++++++
References: bsc#1094508
# Commit 3bdec530a5f50b212aa5fd05d97e7349e8bdba82
# Date 2018-09-11 11:03:46 +0200
# Author Jan Beulich
# Committer Jan Beulich
x86/HVM: split page straddling emulated accesses in more cases
Assuming consecutive linear addresses map to all RAM or all MMIO is not
correct. Nor is assuming that a page straddling MMIO access will access
the same emulating component for both parts of the access. If a guest
RAM read fails with HVMTRANS_bad_gfn_to_mfn and if the access straddles
a page boundary, issue accesses separately for both parts.
The extra call to known_gla() from hvmemul_write() is just to preserve
original behavior; for consistency the check also gets added to
hvmemul_rmw() (albeit I continue to be unsure whether we wouldn't better
drop both).
Note that the correctness of this depends on the MMIO caching used
elsewhere in the emulation code.
Signed-off-by: Jan Beulich
Tested-by: Olaf Hering
Reviewed-by: Paul Durrant
--- a/xen/arch/x86/hvm/emulate.c
+++ b/xen/arch/x86/hvm/emulate.c
@@ -1044,7 +1044,91 @@ static bool known_gla(unsigned long addr
else if ( !vio->mmio_access.read_access )
return false;
- return vio->mmio_gla == (addr & PAGE_MASK);
+ return (vio->mmio_gla == (addr & PAGE_MASK) &&
+ (addr & ~PAGE_MASK) + bytes <= PAGE_SIZE);
+}
+
+static int linear_read(unsigned long addr, unsigned int bytes, void *p_data,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
+
+ switch ( rc )
+ {
+ unsigned int offset, part1;
+
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
+
+ case HVMTRANS_bad_linear_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+ return X86EMUL_EXCEPTION;
+
+ case HVMTRANS_bad_gfn_to_mfn:
+ if ( pfec & PFEC_insn_fetch )
+ return X86EMUL_UNHANDLEABLE;
+
+ offset = addr & ~PAGE_MASK;
+ if ( offset + bytes <= PAGE_SIZE )
+ return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec,
+ hvmemul_ctxt,
+ known_gla(addr, bytes, pfec));
+
+ /* Split the access at the page boundary. */
+ part1 = PAGE_SIZE - offset;
+ rc = linear_read(addr, part1, p_data, pfec, hvmemul_ctxt);
+ if ( rc == X86EMUL_OKAY )
+ rc = linear_read(addr + part1, bytes - part1, p_data + part1,
+ pfec, hvmemul_ctxt);
+ return rc;
+
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
+}
+
+static int linear_write(unsigned long addr, unsigned int bytes, void *p_data,
+ uint32_t pfec, struct hvm_emulate_ctxt *hvmemul_ctxt)
+{
+ pagefault_info_t pfinfo;
+ int rc = hvm_copy_to_guest_linear(addr, p_data, bytes, pfec, &pfinfo);
+
+ switch ( rc )
+ {
+ unsigned int offset, part1;
+
+ case HVMTRANS_okay:
+ return X86EMUL_OKAY;
+
+ case HVMTRANS_bad_linear_to_gfn:
+ x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
+ return X86EMUL_EXCEPTION;
+
+ case HVMTRANS_bad_gfn_to_mfn:
+ offset = addr & ~PAGE_MASK;
+ if ( offset + bytes <= PAGE_SIZE )
+ return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec,
+ hvmemul_ctxt,
+ known_gla(addr, bytes, pfec));
+
+ /* Split the access at the page boundary. */
+ part1 = PAGE_SIZE - offset;
+ rc = linear_write(addr, part1, p_data, pfec, hvmemul_ctxt);
+ if ( rc == X86EMUL_OKAY )
+ rc = linear_write(addr + part1, bytes - part1, p_data + part1,
+ pfec, hvmemul_ctxt);
+ return rc;
+
+ case HVMTRANS_gfn_paged_out:
+ case HVMTRANS_gfn_shared:
+ return X86EMUL_RETRY;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
}
static int __hvmemul_read(
@@ -1055,7 +1139,6 @@ static int __hvmemul_read(
enum hvm_access_type access_type,
struct hvm_emulate_ctxt *hvmemul_ctxt)
{
- pagefault_info_t pfinfo;
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present;
int rc;
@@ -1071,31 +1154,8 @@ static int __hvmemul_read(
seg, offset, bytes, &reps, access_type, hvmemul_ctxt, &addr);
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( known_gla(addr, bytes, pfec) )
- return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
- rc = hvm_copy_from_guest_linear(p_data, addr, bytes, pfec, &pfinfo);
-
- switch ( rc )
- {
- case HVMTRANS_okay:
- break;
- case HVMTRANS_bad_linear_to_gfn:
- x86_emul_pagefault(pfinfo.ec, pfinfo.linear, &hvmemul_ctxt->ctxt);
- return X86EMUL_EXCEPTION;
- case HVMTRANS_bad_gfn_to_mfn:
- if ( access_type == hvm_access_insn_fetch )
- return X86EMUL_UNHANDLEABLE;
-
- return hvmemul_linear_mmio_read(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
- case HVMTRANS_gfn_paged_out:
- case HVMTRANS_gfn_shared:
- return X86EMUL_RETRY;
- default:
- return X86EMUL_UNHANDLEABLE;
- }
-
- return X86EMUL_OKAY;
+ return linear_read(addr, bytes, p_data, pfec, hvmemul_ctxt);
}
static int hvmemul_read(
@@ -1175,7 +1235,7 @@ static int hvmemul_write(
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
int rc;
- void *mapping;
+ void *mapping = NULL;
if ( is_x86_system_segment(seg) )
pfec |= PFEC_implicit;
@@ -1187,15 +1247,15 @@ static int hvmemul_write(
if ( rc != X86EMUL_OKAY || !bytes )
return rc;
- if ( known_gla(addr, bytes, pfec) )
- return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 1);
-
- mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
- if ( IS_ERR(mapping) )
- return ~PTR_ERR(mapping);
+ if ( !known_gla(addr, bytes, pfec) )
+ {
+ mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+ if ( IS_ERR(mapping) )
+ return ~PTR_ERR(mapping);
+ }
if ( !mapping )
- return hvmemul_linear_mmio_write(addr, bytes, p_data, pfec, hvmemul_ctxt, 0);
+ return linear_write(addr, bytes, p_data, pfec, hvmemul_ctxt);
memcpy(mapping, p_data, bytes);
@@ -1217,7 +1277,7 @@ static int hvmemul_rmw(
unsigned long addr, reps = 1;
uint32_t pfec = PFEC_page_present | PFEC_write_access;
int rc;
- void *mapping;
+ void *mapping = NULL;
rc = hvmemul_virtual_to_linear(
seg, offset, bytes, &reps, hvm_access_write, hvmemul_ctxt, &addr);
@@ -1229,9 +1289,12 @@ static int hvmemul_rmw(
else if ( hvmemul_ctxt->seg_reg[x86_seg_ss].dpl == 3 )
pfec |= PFEC_user_mode;
- mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
- if ( IS_ERR(mapping) )
- return ~PTR_ERR(mapping);
+ if ( !known_gla(addr, bytes, pfec) )
+ {
+ mapping = hvmemul_map_linear_addr(addr, bytes, pfec, hvmemul_ctxt);
+ if ( IS_ERR(mapping) )
+ return ~PTR_ERR(mapping);
+ }
if ( mapping )
{
@@ -1241,17 +1304,14 @@ static int hvmemul_rmw(
else
{
unsigned long data = 0;
- bool known_gpfn = known_gla(addr, bytes, pfec);
if ( bytes > sizeof(data) )
return X86EMUL_UNHANDLEABLE;
- rc = hvmemul_linear_mmio_read(addr, bytes, &data, pfec, hvmemul_ctxt,
- known_gpfn);
+ rc = linear_read(addr, bytes, &data, pfec, hvmemul_ctxt);
if ( rc == X86EMUL_OKAY )
rc = x86_emul_rmw(&data, bytes, eflags, state, ctxt);
if ( rc == X86EMUL_OKAY )
- rc = hvmemul_linear_mmio_write(addr, bytes, &data, pfec,
- hvmemul_ctxt, known_gpfn);
+ rc = linear_write(addr, bytes, &data, pfec, hvmemul_ctxt);
}
return rc;