Hello community,
here is the log from the commit of package xen.1952 for openSUSE:12.2:Update checked in at 2013-08-30 15:48:42
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:12.2:Update/xen.1952 (Old)
and /work/SRC/openSUSE:12.2:Update/.xen.1952.new (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "xen.1952"
Changes:
--------
New Changes file:
--- /dev/null 2013-07-23 23:44:04.804033756 +0200
+++ /work/SRC/openSUSE:12.2:Update/.xen.1952.new/xen.changes 2013-08-30 15:48:54.000000000 +0200
@@ -0,0 +1,6935 @@
+-------------------------------------------------------------------
+Thu Aug 8 08:26:07 MDT 2013 - carnold@suse.com
+
+- bnc#824676 - L3: Failed to setup devices for vm instance when
+ start multiple vms simultaneously
+ blktap-close-fifos.patch
+
+-------------------------------------------------------------------
+Wed Aug 7 09:35:15 MDT 2013 - carnold@suse.com
+
+- bnc#XXXXXX - VUL-0: xen: CVE-2013-XXXX: XSA-61: suppress device
+ assignment to HVM guest when there is no IOMMU
+ 27204-libxl-suppress-device-assignment-to-HVM-guest-without-IOMMU.patch
+- Upstream patches from Jan
+ 27127-x86-HVM-fix-x2APIC-APIC_ID-read-emulation.patch
+ 27128-x86-HVM-fix-initialization-of-wallclock-time-for-PVHVM-on-migration.patch
+ 27168-x86-hvm-fix-HVMOP_inject_trap-return-value-on-success.patch
+ 27178-libxl-Restrict-permissions-on-PV-console-device-xenstore-nodes.patch (Replaces CVE-2013-2211-xsa57.patch)
+ 27181-x86-fix-page-refcount-handling-in-page-table-pin-error-path.patch (Replaces CVE-2013-1432-xsa58.patch)
+ 27197-AMD-intremap-Prevent-use-of-per-device-vector-maps.patch
+ 27212-x86-don-t-pass-negative-time-to-gtime_to_gtsc-try-2.patch
+ 27213-iommu-amd-Fix-logic-for-clearing-the-IOMMU-interrupt-bits.patch
+ 27214-iommu-amd-Workaround-for-erratum-787.patch
+ 27221-x86-mm-Ensure-useful-progress-in-alloc_l2_table.patch
+ 27248-x86-cpuidle-Change-logging-for-unknown-APIC-IDs.patch
+ 27263-x86-time-Update-wallclock-in-shared-info-when-altering-domain-time-offset.patch
+ 27321-x86-refine-FPU-selector-handling-code-for-XSAVEOPT.patch
+ x86-xsa21-emuirq-fix.patch
+
+-------------------------------------------------------------------
+Fri Jul 12 20:49:39 CEST 2013 - ohering@suse.de
+
+- bnc#823786 - migrate.py support of short options dropped by PTF
+- bnc#803712 - after live migration rcu_sched_state detected stalls
+ add new option xm migrate --min_remaing <num>
+ xen.migrate.tools_set_number_of_dirty_pages_during_migration.patch
+
+-------------------------------------------------------------------
+Fri Jun 14 09:39:37 MDT 2013 - carnold@suse.com
+
+- bnc#826882 - VUL-0: xen: XSA-58: x86: fix page refcount handling
+ in page table pin error path
+ CVE-2013-1432-xsa58.patch
+- bnc#823608 - VUL-0: xen: XSA-57: libxl allows guest write access to
+ sensitive console related xenstore keys
+ CVE-2013-2211-xsa57.patch
+- bnc#823011 - VUL-0: xen: XSA-55: Multiple vulnerabilities in
+ libelf PV kernel handling
+ 27141-libelf-abolish-libelf-relocate.c.patch
+ 27142-libxc-introduce-xc_dom_seg_to_ptr_pages.patch
+ 27143-libxc-Fix-range-checking-in-xc_dom_pfn_to_ptr-etc.patch
+ 27145-libelf-abolish-elf_sval-and-elf_access_signed.patch
+ 27147-libelf-xc_dom_load_elf_symtab-Do-not-use-syms-uninit.patch
+ 27148-libelf-introduce-macros-for-memory-access-and-pointe.patch
+ 27149-tools-xcutils-readnotes-adjust-print_l1_mfn_valid_no.patch
+ 27150-libelf-check-nul-terminated-strings-properly.patch
+ 27151-libelf-check-all-pointer-accesses.patch
+ 27152-libelf-Check-pointer-references-in-elf_is_elfbinary.patch
+ 27153-libelf-Make-all-callers-call-elf_check_broken.patch
+ 27154-libelf-use-C99-bool-for-booleans.patch
+ 27155-libelf-use-only-unsigned-integers.patch
+ 27156-libelf-check-loops-for-running-away.patch
+ 27157-libelf-abolish-obsolete-macros.patch
+ 27158-libxc-Add-range-checking-to-xc_dom_binloader.patch
+ 27159-libxc-check-failure-of-xc_dom_-_to_ptr-xc_map_foreig.patch
+ 27160-libxc-check-return-values-from-malloc.patch
+ 27161-libxc-range-checks-in-xc_dom_p2m_host-and-_guest.patch
+ 27162-libxc-check-blob-size-before-proceeding-in-xc_dom_ch.patch
+- Upstream patches from Jan
+ 27085-x86-fix-boot-time-APIC-mode-detection.patch
+ 27093-libxc-limit-cpu-values-when-setting-vcpu-affinity.patch (Replaces CVE-2013-2072-xsa56.patch)
+ 27112-x86-fix-ordering-of-operations-in-destroy_irq.patch
+ 27117-x86-MCE-disable-if-MCE-banks-are-not-present.patch
+ 27118-x86-xsave-fix-information-leak-on-AMD-CPUs.patch (Replaces CVE-2013-2076-xsa52.patch)
+ 27119-x86-xsave-recover-from-faults-on-XRSTOR.patch (Replaces CVE-2013-2077-xsa53.patch)
+ 27120-x86-xsave-properly-check-guest-input-to-XSETBV.patch (Replaces CVE-2013-2078-xsa54.patch)
+ 27121-x86-preserve-FPU-selectors-for-32-bit-guest-code.patch
+ 27122-x86-fix-XCR0-handling.patch
+ 27123-x86-vtsc-update-vcpu_time-in-hvm_set_guest_time.patch
+- Dropped 27083-AMD-iommu-SR56x0-Erratum-64-Reset-all-head-tail-pointers.patch
+
+-------------------------------------------------------------------
+Fri May 31 09:40:59 MDT 2013 - carnold@suse.com
+
+- bnc#801663 - performance of mirror lvm unsuitable for production
+ block-dmmd
+
+-------------------------------------------------------------------
+Thu May 23 11:12:38 MDT 2013 - carnold@suse.com
+
+- bnc#816159 - VUL-0: xen: CVE-2013-1918: XSA-45: Several long
+ latency operations are not preemptible
+ 26946-x86-make-vcpu_destroy_pagetables-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26947-x86-make-new_guest_cr3-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26948-x86-make-MMUEXT_NEW_USER_BASEPTR-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26949-x86-make-vcpu_reset-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26950-x86-make-arch_set_info_guest-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26951-x86-make-page-table-unpinning-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+ 26952-x86-make-page-table-handling-error-paths-preemptible.patch (Replaces CVE-2013-1918-xsa45-*)
+- bnc#816163 - VUL-0: xen: CVE-2013-1952: XSA-49: VT-d interrupt
+ remapping source validation flaw for bridges
+ 26958-VT-d-don-t-permit-SVT_NO_VERIFY-entries-for-known-device-types.patch (Replaces CVE-2013-1952-xsa49.patch)
+- Upstream patches from Jan
+ 26956-x86-mm-preemptible-cleanup.patch
+ 27071-x86-IO-APIC-fix-guest-RTE-write-corner-cases.patch
+ 27072-x86-shadow-fix-off-by-one-in-MMIO-permission-check.patch
+ 27079-fix-XSA-46-regression-with-xend-xm.patch
+ 27083-AMD-iommu-SR56x0-Erratum-64-Reset-all-head-tail-pointers.patch
+
+-------------------------------------------------------------------
+Mon May 20 08:58:46 MDT 2013 - carnold@suse.com
+
+- bnc#820917 - VUL-1: CVE-2013-2076: xen: Information leak on
+ XSAVE/XRSTOR capable AMD CPUs (XSA-52)
+ CVE-2013-2076-xsa52.patch
+- bnc#820919 - VUL-0: CVE-2013-2077: xen: Hypervisor crash due to
+ missing exception recovery on XRSTOR (XSA-53)
+ CVE-2013-2077-xsa53.patch
+- bnc#820920 - VUL-1: CVE-2013-2078: xen: Hypervisor crash due to
+ missing exception recovery on XSETBV (XSA-54)
+ CVE-2013-2078-xsa54.patch
+
+-------------------------------------------------------------------
+Tue May 14 08:32:55 MDT 2013 - carnold@suse.com
+
+- bnc#819416 - VUL-0: xen: CVE-2013-2072: XSA-56: Buffer overflow
+ in xencontrol Python bindings affecting xend
+ CVE-2013-2072-xsa56.patch
+
+-------------------------------------------------------------------
+Tue Apr 30 09:15:26 MDT 2013 - carnold@suse.com
+
+- Additional fix for bnc#816159
+ CVE-2013-1918-xsa45-followup.patch
+
+-------------------------------------------------------------------
+Wed Apr 24 10:30:48 MDT 2013 - carnold@suse.com
+
+- Update to Xen 4.1.5 c/s 23509
+ There were many xen.spec file patches dropped as now being included
+ in the 4.1.5 tarball. For reference, the following recent security
+ patches are also part of the tarball:
+ CVE-2012-6075-xsa41.patch (bnc#797523)
+ CVE-2013-1917-xsa44.patch (bnc#813673)
+ CVE-2013-1919-xsa46.patch (bnc#813675)
+ CVE-2013-1964-xsa50.patch (bnc#816156)
+
+-------------------------------------------------------------------
+Fri Apr 19 14:22:43 MDT 2013 - carnold@suse.com
+
+- bnc#816159 - VUL-0: xen: CVE-2013-1918: XSA-45: Several long
+ latency operations are not preemptible
+ CVE-2013-1918-xsa45-1-vcpu-destroy-pagetables-preemptible.patch
+ CVE-2013-1918-xsa45-2-new-guest-cr3-preemptible.patch
+ CVE-2013-1918-xsa45-3-new-user-base-preemptible.patch
+ CVE-2013-1918-xsa45-4-vcpu-reset-preemptible.patch
+ CVE-2013-1918-xsa45-5-set-info-guest-preemptible.patch
+ CVE-2013-1918-xsa45-6-unpin-preemptible.patch
+ CVE-2013-1918-xsa45-7-mm-error-paths-preemptible.patch
+- bnc#816163 - VUL-0: xen: CVE-2013-1952: XSA-49: VT-d interrupt
+ remapping source validation flaw for bridges
+ CVE-2013-1952-xsa49.patch
+
+-------------------------------------------------------------------
+Thu Apr 18 10:17:08 MDT 2013 - cyliu@suse.com
+
+- bnc#809662 - can't use pv-grub to start domU (pygrub does work)
+ xen.spec
+
+-------------------------------------------------------------------
+Thu Apr 18 07:47:39 MDT 2013 - carnold@suse.com
+
+- bnc#813673 - VUL-0: CVE-2013-1917: xen: Xen PV DoS vulnerability with
+ SYSENTER
+ CVE-2013-1917-xsa44.patch
+- bnc#813675 - VUL-0: CVE-2013-1919: xen: Several access permission
+ issues with IRQs for unprivileged guests
+ CVE-2013-1919-xsa46.patch
+- bnc#813677 - VUL-0: CVE-2013-1920: xen: Potential use of freed
+ memory in event channel operations
+ 26774-defer-event-channel-bucket-pointer-store-until-after-XSM-checks.patch
+- Upstream patches from Jan
+ CVE-2013-1964-xsa50.patch
+ 26702-powernow-add-fixups-for-AMD-P-state-figures.patch
+ 26704-x86-MCA-suppress-bank-clearing-for-certain-injected-events.patch
+ 26731-AMD-IOMMU-Process-softirqs-while-building-dom0-iommu-mappings.patch
+ 26733-VT-d-Enumerate-IOMMUs-when-listing-capabilities.patch
+ 26734-ACPI-ERST-Name-table-in-otherwise-opaque-error-messages.patch
+ 26736-ACPI-APEI-Unlock-apei_iomaps_lock-on-error-path.patch
+ 26737-ACPI-APEI-Add-apei_exec_run_optional.patch
+ 26742-IOMMU-properly-check-whether-interrupt-remapping-is-enabled.patch
+ 26743-VT-d-deal-with-5500-5520-X58-errata.patch
+ 26744-AMD-IOMMU-allow-disabling-only-interrupt-remapping.patch
+ 26749-x86-reserve-pages-when-SandyBridge-integrated-graphics.patch
+ 26765-hvm-Clean-up-vlapic_reg_write-error-propagation.patch
+ 26770-x86-irq_move_cleanup_interrupt-must-ignore-legacy-vectors.patch
+ 26771-x86-S3-Restore-broken-vcpu-affinity-on-resume.patch
++++ 6738 more lines (skipped)
++++ between /dev/null
++++ and /work/SRC/openSUSE:12.2:Update/.xen.1952.new/xen.changes
New:
----
22998-x86-get_page_from_l1e-retcode.patch
22999-x86-mod_l1_entry-retcode.patch
23000-x86-mod_l2_entry-retcode.patch
23050-xentrace_dynamic_tracebuffer_allocation.patch
23074-pfn.h.patch
23091-xentrace_fix_t_info_pages_calculation..patch
23092-xentrace_print_calculated_numbers_in_calculate_tbuf_size.patch
23093-xentrace_remove_gdprintk_usage_since_they_are_not_in_guest_context.patch
23094-xentrace_update_comments.patch
23095-xentrace_use_consistent_printk_prefix.patch
23096-x86-hpet-no-cpumask_lock.patch
23099-x86-rwlock-scalability.patch
23127-vtd-bios-settings.patch
23128-xentrace_correct_formula_to_calculate_t_info_pages.patch
23129-xentrace_remove_unneeded_debug_printk.patch
23173-xentrace_Move_register_cpu_notifier_call_into_boot-time_init..patch
23199-amd-iommu-unmapped-intr-fault.patch
23236-svm-decode-assist-invlpg.patch
23239-xentrace_correct_overflow_check_for_number_of_per-cpu_trace_pages.patch
23246-x86-xsave-enable.patch
23303-cpufreq-misc.patch
23308-xentrace_Move_the_global_variable_t_info_first_offset_into_calculate_tbuf_size.patch
23309-xentrace_Mark_data_size___read_mostly_because_its_only_written_once.patch
23310-xentrace_Remove_unneeded_cast_when_assigning_pointer_value_to_dst.patch
23334-amd-fam12+14-vpmu.patch
23383-libxc-rm-static-vars.patch
23404-xentrace_reduce_trace_buffer_size_to_something_mfn_offset_can_reach.patch
23405-xentrace_fix_type_of_offset_to_avoid_ouf-of-bounds_access.patch
23406-xentrace_update___insert_record_to_copy_the_trace_record_to_individual_mfns.patch
23407-xentrace_allocate_non-contiguous_per-cpu_trace_buffers.patch
23462-libxc-cpu-feature.patch
23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch
23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch
23508-vmx-proc-based-ctls-probe.patch
23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch
23562-xenpaging_remove_unused_spinlock_in_pager.patch
23571-vtd-fault-verbosity.patch
23574-x86-dom0-compressed-ELF.patch
23575-x86-DMI.patch
23576-x86_show_page_walk_also_for_early_page_faults.patch
23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch
23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch
23579-xenpaging_drop_xc.c_remove_ASSERT.patch
23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch
23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch
23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch
23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch
23584-xenpaging_drop_xc.c_remove_xc_files.patch
23585-xenpaging_correct_dropping_of_pages_to_avoid_full_ring_buffer.patch
23586-xenpaging_do_not_bounce_p2mt_back_to_the_hypervisor.patch
23587-xenpaging_remove_srand_call.patch
23588-xenpaging_remove_return_values_from_functions_that_can_not_fail.patch
23589-xenpaging_catch_xc_mem_paging_resume_errors.patch
23590-xenpaging_remove_local_domain_id_variable.patch
23591-xenpaging_move_num_pages_into_xenpaging_struct.patch
23592-xenpaging_start_paging_in_the_middle_of_gfn_range.patch
23593-xenpaging_pass_integer_to_xenpaging_populate_page.patch
23594-xenpaging_add_helper_function_for_unlinking_pagefile.patch
23595-xenpaging_add_watch_thread_to_catch_guest_shutdown.patch
23596-xenpaging_implement_stopping_of_pager_by_sending_SIGTERM-SIGINT.patch
23597-xenpaging_remove_private_mem_event.h.patch
23599-tools_fix_build_after_recent_xenpaging_changes.patch
23613-EFI-headers.patch
23614-x86_64-EFI-boot.patch
23615-x86_64-EFI-runtime.patch
23616-x86_64-EFI-MPS.patch
23643-xentrace_Allow_tracing_to_be_enabled_at_boot.patch
23676-x86_64-image-map-bounds.patch
23697-pygrub-grub2.patch
23719-xentrace_update___trace_var_comment.patch
23723-x86-CMOS-lock.patch
23735-guest-dom0-cap.patch
23749-mmcfg-reservation.patch
23771-x86-ioapic-clear-pin.patch
23772-x86-trampoline.patch
23774-x86_64-EFI-EDD.patch
23782-x86-ioapic-clear-irr.patch
23783-ACPI-set-_PDC-bits.patch
23804-x86-IPI-counts.patch
23817-mem_event_add_ref_counting_for_free_requestslots.patch
23818-mem_event_use_mem_event_mark_and_pause_in_mem_event_check_ring.patch
23827-xenpaging_use_batch_of_pages_during_final_page-in.patch
23841-mem_event_pass_mem_event_domain_pointer_to_mem_event_functions.patch
23842-mem_event_use_different_ringbuffers_for_share_paging_and_access.patch
23874-xenpaging_track_number_of_paged_pages_in_struct_domain.patch
23897-x86-mce-offline-again.patch
23904-xenpaging_use_p2m-get_entry_in_p2m_mem_paging_functions.patch
23905-xenpaging_fix_locking_in_p2m_mem_paging_functions.patch
23906-xenpaging_remove_confusing_comment_from_p2m_mem_paging_populate.patch
23933-pt-bus2bridge-update.patch
23943-xenpaging_clear_page_content_after_evict.patch
23944-pygrub-debug.patch
23949-constify_vcpu_set_affinitys_second_parameter.patch
23953-xenpaging_handle_evict_failures.patch
23957-cpufreq-error-paths.patch
23978-xenpaging_check_p2mt_in_p2m_mem_paging_functions.patch
23979-xenpaging_document_p2m_mem_paging_functions.patch
23980-xenpaging_disallow_paging_in_a_PoD_guest.patch
23993-x86-microcode-amd-fix-23871.patch
23999-pygrub-grub2.patch
24064-pygrub-HybridISO.patch
24104-waitqueue_Double_size_of_x86_shadow_stack..patch
24105-xenpaging_compare_domain_pointer_in_p2m_mem_paging_populate.patch
24106-mem_event_check_capabilities_only_once.patch
24123-x86-cpuidle-quiesce.patch
24124-x86-microcode-amd-quiesce.patch
24138-xenpaging_munmap_all_pages_after_page-in.patch
24153-x86-emul-feature-checks.patch
24171-x86waitqueue_Allocate_whole_page_for_shadow_stack..patch
24178-debug_Add_domain-vcpu_pause_count_info_to_d_key..patch
24195-waitqueue_Detect_saved-stack_overflow_and_crash_the_guest..patch
24196-waitqueue_Reorder_prepare_to_wait_so_that_vcpu_is_definitely_on_the.patch
24197-x86-waitqueue_Because_we_have_per-cpu_stacks_we_must_wake_up_on_teh.patch
24208-xenpaging_remove_filename_from_comment.patch
24209-xenpaging_remove_obsolete_comment_in_resume_path.patch
24210-xenpaging_use_PERROR_to_print_errno.patch
24211-xenpaging_simplify_file_op.patch
24212-xenpaging_print_gfn_in_failure_case.patch
24213-xenpaging_update_xenpaging_init.patch
24214-xenpaging_remove_xc_dominfo_t_from_paging_t.patch
24215-xenpaging_track_the_number_of_paged-out_pages.patch
24216-xenpaging_move_page_add-resume_loops_into_its_own_function..patch
24217-xenpaging_improve_mainloop_exit_handling.patch
24218-libxc_add_bitmap_clear_function.patch
24219-xenpaging_retry_unpageable_gfns.patch
24220-xenpaging_install_into_LIBEXEC_dir.patch
24221-xenpaging_add_XEN_PAGING_DIR_-_libxl_xenpaging_dir_path.patch
24222-xenpaging_use_guests_tot_pages_as_working_target.patch
24223-xenpaging_watch_the_guests_memory-target-tot_pages_xenstore_value.patch
24224-xenpaging_add_cmdline_interface_for_pager.patch
24225-xenpaging_improve_policy_mru_list_handling.patch
24226-xenpaging_add_debug_to_show_received_watch_event..patch
24227-xenpaging_restrict_pagefile_permissions.patch
24231-waitqueue_Implement_wake_up_nroneall..patch
24232-waitqueue_Hold_a_reference_to_a_domain_on_a_waitqueue..patch
24269-mem_event_move_mem_event_domain_out_of_struct_domain.patch
24270-Free_d-mem_event_on_domain_destruction..patch
24272-xenpaging_Fix_c-s_235070a29c8c3ddf7_update_machine_to_phys_mapping_during_page_deallocation.patch
24275-x86-emul-lzcnt.patch
24277-x86-dom0-features.patch
24318-x86-mm_Fix_checks_during_foreign_mapping_of_paged_pages.patch
24327-After_preparing_a_page_for_page-in_allow_immediate_fill-in_of_the_page_contents.patch
24328-Tools_Libxc_wrappers_to_automatically_fill_in_page_oud_page_contents_on_prepare.patch
24329-Teach_xenpaging_to_use_the_new_and_non-racy_xc_mem_paging_load_interface.patch
24359-x86-domU-features.patch
24391-x86-pcpu-version.patch
24401-pygrub-scrolling.patch
24402-pygrub-edit-fix.patch
24459-libxl-vifname.patch
24466-libxc_Only_retry_mapping_pages_when_ENOENT_is_returned.patch
24478-libxl_add_feature_flag_to_xenstore_for_XS_RESET_WATCHES.patch
24566-tools-libxc_fix_error_handling_in_xc_mem_paging_load.patch
24586-x86-mm_Properly_account_for_paged_out_pages.patch
24609-tools-libxc_handle_fallback_in_linux_privcmd_map_foreign_bulk_properly.patch
24610-xenpaging_make_file_op_largefile_aware.patch
24706-pygrub-extlinux.patch
24780-x86-paging-use-clear_guest.patch
24781-x86-vmce-mcg_ctl.patch
24805-x86-MSI-X-dom0-ro.patch
24886-x86-vmce-mcg_ctl-default.patch
24887-x86-vmce-sr.patch
25041-tapdisk2-create-init-name.patch
25247-SVM-no-rdtsc-intercept.patch
25267-x86-text-unlikely.patch
25269-x86-vMCE-addr-misc-write.patch
25430-x86-AMD-Fam15-reenable-topoext.patch
25479-x86-boot-trampoline-remove.patch
25590-hotplug-locking.patch
25595-hotplug-locking.patch
25616-x86-MCi_CTL-default.patch
25734-x86-MCG_CTL-default.patch
25735-x86-cpuid-masking-XeonE5.patch
25833-32on64-bogus-pt_base-adjust.patch
26079-hotplug-locking.patch
26200-IOMMU-debug-verbose.patch
26272-x86-EFI-makefile-cflags-filter.patch
26333-x86-get_page_type-assert.patch
26370-libxc-x86-initial-mapping-fit.patch
26404-x86-forward-both-NMI-kinds.patch
26547-tools-xc_fix_logic_error_in_stdiostream_progress.patch
26548-tools-xc_handle_tty_output_differently_in_stdiostream_progress.patch
26549-tools-xc_turn_XCFLAGS__into_shifts.patch
26550-tools-xc_restore_logging_in_xc_save.patch
26551-tools-xc_log_pid_in_xc_save-xc_restore_output.patch
26675-tools-xentoollog_update_tty_detection_in_stdiostream_progress.patch
26946-x86-make-vcpu_destroy_pagetables-preemptible.patch
26947-x86-make-new_guest_cr3-preemptible.patch
26948-x86-make-MMUEXT_NEW_USER_BASEPTR-preemptible.patch
26949-x86-make-vcpu_reset-preemptible.patch
26950-x86-make-arch_set_info_guest-preemptible.patch
26951-x86-make-page-table-unpinning-preemptible.patch
26952-x86-make-page-table-handling-error-paths-preemptible.patch
26956-x86-mm-preemptible-cleanup.patch
26958-VT-d-don-t-permit-SVT_NO_VERIFY-entries-for-known-device-types.patch
27071-x86-IO-APIC-fix-guest-RTE-write-corner-cases.patch
27072-x86-shadow-fix-off-by-one-in-MMIO-permission-check.patch
27079-fix-XSA-46-regression-with-xend-xm.patch
27085-x86-fix-boot-time-APIC-mode-detection.patch
27093-libxc-limit-cpu-values-when-setting-vcpu-affinity.patch
27112-x86-fix-ordering-of-operations-in-destroy_irq.patch
27117-x86-MCE-disable-if-MCE-banks-are-not-present.patch
27118-x86-xsave-fix-information-leak-on-AMD-CPUs.patch
27119-x86-xsave-recover-from-faults-on-XRSTOR.patch
27120-x86-xsave-properly-check-guest-input-to-XSETBV.patch
27121-x86-preserve-FPU-selectors-for-32-bit-guest-code.patch
27122-x86-fix-XCR0-handling.patch
27123-x86-vtsc-update-vcpu_time-in-hvm_set_guest_time.patch
27127-x86-HVM-fix-x2APIC-APIC_ID-read-emulation.patch
27128-x86-HVM-fix-initialization-of-wallclock-time-for-PVHVM-on-migration.patch
27141-libelf-abolish-libelf-relocate.c.patch
27142-libxc-introduce-xc_dom_seg_to_ptr_pages.patch
27143-libxc-Fix-range-checking-in-xc_dom_pfn_to_ptr-etc.patch
27145-libelf-abolish-elf_sval-and-elf_access_signed.patch
27147-libelf-xc_dom_load_elf_symtab-Do-not-use-syms-uninit.patch
27148-libelf-introduce-macros-for-memory-access-and-pointe.patch
27149-tools-xcutils-readnotes-adjust-print_l1_mfn_valid_no.patch
27150-libelf-check-nul-terminated-strings-properly.patch
27151-libelf-check-all-pointer-accesses.patch
27152-libelf-Check-pointer-references-in-elf_is_elfbinary.patch
27153-libelf-Make-all-callers-call-elf_check_broken.patch
27154-libelf-use-C99-bool-for-booleans.patch
27155-libelf-use-only-unsigned-integers.patch
27156-libelf-check-loops-for-running-away.patch
27157-libelf-abolish-obsolete-macros.patch
27158-libxc-Add-range-checking-to-xc_dom_binloader.patch
27159-libxc-check-failure-of-xc_dom_-_to_ptr-xc_map_foreig.patch
27160-libxc-check-return-values-from-malloc.patch
27161-libxc-range-checks-in-xc_dom_p2m_host-and-_guest.patch
27162-libxc-check-blob-size-before-proceeding-in-xc_dom_ch.patch
27168-x86-hvm-fix-HVMOP_inject_trap-return-value-on-success.patch
27178-libxl-Restrict-permissions-on-PV-console-device-xenstore-nodes.patch
27181-x86-fix-page-refcount-handling-in-page-table-pin-error-path.patch
27197-AMD-intremap-Prevent-use-of-per-device-vector-maps.patch
27204-libxl-suppress-device-assignment-to-HVM-guest-without-IOMMU.patch
27212-x86-don-t-pass-negative-time-to-gtime_to_gtsc-try-2.patch
27213-iommu-amd-Fix-logic-for-clearing-the-IOMMU-interrupt-bits.patch
27214-iommu-amd-Workaround-for-erratum-787.patch
27221-x86-mm-Ensure-useful-progress-in-alloc_l2_table.patch
27248-x86-cpuidle-Change-logging-for-unknown-APIC-IDs.patch
27263-x86-time-Update-wallclock-in-shared-info-when-altering-domain-time-offset.patch
27321-x86-refine-FPU-selector-handling-code-for-XSAVEOPT.patch
32on64-extra-mem.patch
README.SuSE
altgr_2.patch
baselibs.conf
bdrv_default_rwflag.patch
bdrv_open2_fix_flags.patch
bdrv_open2_flags_2.patch
blktap-close-fifos.patch
blktap-disable-debug-printf.patch
blktap-pv-cdrom.patch
blktap.patch
blktapctrl-default-to-ioemu.patch
block-dmmd
block-iscsi
block-nbd
block-npiv
block-npiv-common.sh
block-npiv-vport
boot.local.xenU
boot.xen
bridge-bonding.diff
bridge-opensuse.patch
bridge-record-creation.patch
bridge-vlan.diff
build-tapdisk-ioemu.patch
capslock_enable.patch
cdrom-removable.patch
change-vnc-passwd.patch
change_home_server.patch
check_device_status.patch
checkpoint-rename.patch
del_usb_xend_entry.patch
disable-xl-when-using-xend.patch
disable_emulated_device.diff
dom-print.patch
domUloader.py
domu-usb-controller.patch
etc_pam.d_xen-api
hibernate.patch
hotplug.losetup.patch
hv_extid_compatibility.patch
init.pciback
init.xen_loop
init.xend
init.xendomains
ioemu-7615-qcow2-fix-alloc_cluster_link_l2.patch
ioemu-9868-MSI-X.patch
ioemu-9869-MSI-X-init.patch
ioemu-9873-MSI-X-fix-unregister_iomem.patch
ioemu-9877-MSI-X-device-cleanup.patch
ioemu-bdrv-open-CACHE_WB.patch
ioemu-blktap-barriers.patch
ioemu-blktap-fv-init.patch
ioemu-blktap-image-format.patch
ioemu-blktap-zero-size.patch
ioemu-debuginfo.patch
ioemu-disable-emulated-ide-if-pv.patch
ioemu-disable-scsi.patch
ioemu-vnc-resize.patch
ioemu-watchdog-ib700-timer.patch
ioemu-watchdog-linkage.patch
ioemu-watchdog-support.patch
ipxe-enable-nics.patch
ipxe-gcc45-warnings.patch
ipxe-ipv4-fragment.patch
kernel-boot-hvm.patch
kmp_filelist
libxen_permissive.patch
log-guest-console.patch
logrotate.conf
magic_ioport_compat.patch
minios-fixups.patch
multi-xvdp.patch
network-nat-open-SuSEfirewall2-FORWARD.patch
pvdrv-import-shared-info.patch
pvdrv_emulation_control.patch
qemu-dm-segfault.patch
qemu-security-etch1.diff
serial-split.patch
snapshot-ioemu-delete.patch
snapshot-ioemu-restore.patch
snapshot-ioemu-save.patch
snapshot-without-pv-fix.patch
snapshot-xend.patch
stdvga-cache.patch
stubdom.tar.bz2
supported_module.diff
suspend_evtchn_lock.patch
sysconfig.pciback
tapdisk-ioemu-logfile.patch
tapdisk-ioemu-shutdown-fix.patch
tmp-initscript-modprobe.patch
tmp_build.patch
tools-kboot.diff
tools-watchdog-support.patch
tools-xc_kexec.diff
udev-rules.patch
usb-list.patch
vif-bridge-no-iptables.patch
vif-bridge-tap-fix.patch
vif-bridge.mtu.patch
vif-route-ifup.patch
x86-cpufreq-report.patch
x86-extra-trap-info.patch
x86-ioapic-ack-default.patch
x86-xsa21-emuirq-fix.patch
xen-4.1.5-testing-src.tar.bz2
xen-api-auth.patch
xen-changeset.diff
xen-config.diff
xen-cpupool-xl-config-format.patch
xen-destdir.diff
xen-disable-qemu-monitor.diff
xen-domUloader.diff
xen-fixme-doc.diff
xen-hvm-default-bridge.diff
xen-hvm-default-pae.diff
xen-ioemu-hvm-pv-support.diff
xen-max-free-mem.diff
xen-minimum-restart-time.patch
xen-no-dummy-nfs-ip.diff
xen-paths.diff
xen-qemu-iscsi-fix.patch
xen-rpmoptflags.diff
xen-unstable.misc.linux_privcmd_map_foreign_bulk.retry_paged.patch
xen-updown.sh
xen-utils-0.1.tar.bz2
xen-warnings-unused.diff
xen-warnings.diff
xen-xm-top-needs-root.diff
xen-xmexample-vti.diff
xen-xmexample.diff
xen.changes
xen.migrate.tools-xc_document_printf_calls_in_xc_restore.patch
xen.migrate.tools-xc_print_messages_from_xc_save_with_xc_report.patch
xen.migrate.tools-xc_rework_xc_save.cswitch_qemu_logdirty.patch
xen.migrate.tools_add_xm_migrate_--log_progress_option.patch
xen.migrate.tools_set_migration_constraints_from_cmdline.patch
xen.migrate.tools_set_number_of_dirty_pages_during_migration.patch
xen.no-default-runlevel-4.patch
xen.sles11sp1.fate311487.xen_platform_pci.dmistring.patch
xen.spec
xen_pvdrivers.conf
xenalyze.gcc46.patch
xenalyze.hg.tar.bz2
xenapi-console-protocol.patch
xenapiusers
xencommons-proc-xen.patch
xenconsole-no-multiple-connections.patch
xend-config-enable-dump-comment.patch
xend-config.diff
xend-console-port-restore.patch
xend-core-dump-loc.diff
xend-cpuid.patch
xend-cpuinfo-model-name.patch
xend-devid-or-name.patch
xend-disable-internal-logrotate.patch
xend-domain-lock-sfex.patch
xend-domain-lock.patch
xend-migration-domname-fix.patch
xend-relocation-server.fw
xend-relocation.sh
xend-sysconfig.patch
xend-vcpu-affinity-fix.patch
xenpaging.autostart.patch
xenpaging.doc.patch
xenpaging.error-handling.patch
xenpaging.evict_fail_fast_forward.patch
xenpaging.evict_mmap_readonly.patch
xenpaging.guest-memusage.patch
xenpaging.mem_event-use-wait_queue.patch
xenpaging.mmap-before-nominate.patch
xenpaging.p2m_is_paged.patch
xenpaging.qemu.flush-cache.patch
xenpaging.speedup-page-in.gfn_to_slot.patch
xenpaging.speedup-page-out.evict_pages.free_slot_stack.patch
xenpaging.speedup-page-out.patch
xenpaging.speedup-page-out.policy_choose_victim.patch
xenpaging.speedup-page-out.resume_pages.find_next_bit_set.patch
xenpaging.versioned-interface.patch
xenpaging.waitqueue-paging.patch
xenstored.XS_RESET_WATCHES.patch
xl-create-pv-with-qcow2-img.patch
xm-create-maxmem.patch
xm-create-xflag.patch
xm-save-check-file.patch
xm-test-cleanup.diff
xmclone.sh
xmexample.disks
xmexample.domUloader
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ xen.spec ++++++
++++ 1761 lines (skipped)
++++++ 22998-x86-get_page_from_l1e-retcode.patch ++++++
References: bnc#675363
# HG changeset patch
# User Keir Fraser
# Date 1299687371 0
# Node ID e9fab50d7b61d151d51a4b1088930c9e1ca2da47
# Parent 5f28dcea13555f7ab948c9cb95de3e79e0fbfc4b
x86: make get_page_from_l1e() return a proper error code
... so that the guest can actually know the reason for the (hypercall)
failure.
ptwr_do_page_fault() could propagate the error indicator received from
get_page_from_l1e() back to the guest in the high half of the error
code (entry_vector), provided we're sure all existing guests can deal
with that (or indicate so by means of a to-be-added guest feature
flag). Alternatively, a second virtual status register (like CR2)
could be introduced.
Signed-off-by: Jan Beulich
# HG changeset patch
# User Jan Beulich
# Date 1340271059 -7200
# Node ID baa85434d0ec16629ca30b7c07deaa9beb3ea9c5
# Parent d4cdcf4d541cc4ce72c48df2e26c2b506c5b04bd
x86/mm: fix mod_l1_entry() return value when encountering r/o MMIO page
While putting together the workaround announced in
http://lists.xen.org/archives/html/xen-devel/2012-06/msg00709.html, I
found that mod_l1_entry(), upon encountering a set bit in
mmio_ro_ranges, would return 1 instead of 0 (the removal of the write
permission is supposed to be entirely transparent to the caller, even
more so to the calling guest).
Signed-off-by: Jan Beulich
Acked-by: Keir Fraser
Index: xen-4.1.5-testing/xen/arch/x86/mm/shadow/multi.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/mm/shadow/multi.c
+++ xen-4.1.5-testing/xen/arch/x86/mm/shadow/multi.c
@@ -872,7 +872,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
// If a privileged domain is attempting to install a map of a page it does
// not own, we let it succeed anyway.
//
- if ( unlikely(!res) &&
+ if ( unlikely(res < 0) &&
!shadow_mode_translate(d) &&
mfn_valid(mfn = shadow_l1e_get_mfn(sl1e)) &&
(owner = page_get_owner(mfn_to_page(mfn))) &&
@@ -883,11 +883,11 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
SHADOW_PRINTK("privileged domain %d installs map of mfn %05lx "
"which is owned by domain %d: %s\n",
d->domain_id, mfn_x(mfn), owner->domain_id,
- res ? "success" : "failed");
+ res >= 0 ? "success" : "failed");
}
/* Okay, it might still be a grant mapping PTE. Try it. */
- if ( unlikely(!res) &&
+ if ( unlikely(res < 0) &&
(type == p2m_grant_map_rw ||
(type == p2m_grant_map_ro &&
!(shadow_l1e_get_flags(sl1e) & _PAGE_RW))) )
@@ -900,7 +900,7 @@ shadow_get_page_from_l1e(shadow_l1e_t sl
res = get_page_from_l1e(sl1e, d, page_get_owner(mfn_to_page(mfn)));
}
- if ( unlikely(!res) )
+ if ( unlikely(res < 0) )
{
perfc_incr(shadow_get_page_fail);
SHADOW_PRINTK("failed: l1e=" SH_PRI_pte "\n");
@@ -1229,15 +1229,15 @@ static int shadow_set_l1e(struct vcpu *v
TRACE_SHADOW_PATH_FLAG(TRCE_SFLAG_SHADOW_L1_GET_REF);
switch ( shadow_get_page_from_l1e(new_sl1e, d, new_type) )
{
- case 0:
+ default:
/* Doesn't look like a pagetable. */
flags |= SHADOW_SET_ERROR;
new_sl1e = shadow_l1e_empty();
break;
- case -1:
+ case 1:
shadow_l1e_remove_flags(new_sl1e, _PAGE_RW);
/* fall through */
- default:
+ case 0:
shadow_vram_get_l1e(new_sl1e, sl1e, sl1mfn, d);
break;
}
Index: xen-4.1.5-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/mm.c
@@ -801,12 +801,12 @@ get_page_from_l1e(
bool_t write;
if ( !(l1f & _PAGE_PRESENT) )
- return 1;
+ return 0;
if ( unlikely(l1f & l1_disallow_mask(l1e_owner)) )
{
MEM_LOG("Bad L1 flags %x", l1f & l1_disallow_mask(l1e_owner));
- return 0;
+ return -EINVAL;
}
if ( !mfn_valid(mfn) ||
@@ -823,28 +823,34 @@ get_page_from_l1e(
if ( !iomem_access_permitted(pg_owner, mfn, mfn) )
{
if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */
+ {
MEM_LOG("Non-privileged (%u) attempt to map I/O space %08lx",
pg_owner->domain_id, mfn);
- return 0;
+ return -EPERM;
+ }
+ return -EINVAL;
}
if ( pg_owner != l1e_owner &&
!iomem_access_permitted(l1e_owner, mfn, mfn) )
{
if ( mfn != (PADDR_MASK >> PAGE_SHIFT) ) /* INVALID_MFN? */
+ {
MEM_LOG("Dom%u attempted to map I/O space %08lx in dom%u to dom%u",
curr->domain->domain_id, mfn, pg_owner->domain_id,
l1e_owner->domain_id);
- return 0;
+ return -EPERM;
+ }
+ return -EINVAL;
}
if ( !(l1f & _PAGE_RW) || IS_PRIV(pg_owner) ||
!rangeset_contains_singleton(mmio_ro_ranges, mfn) )
- return 1;
+ return 0;
dprintk(XENLOG_G_WARNING,
"d%d: Forcing read-only access to MFN %lx\n",
l1e_owner->domain_id, mfn);
- return -1;
+ return 1;
}
if ( unlikely(real_pg_owner != pg_owner) )
@@ -875,6 +881,7 @@ get_page_from_l1e(
{
unsigned long x, nx, y = page->count_info;
unsigned long cacheattr = pte_flags_to_cacheattr(l1f);
+ int err;
if ( is_xen_heap_page(page) )
{
@@ -882,7 +889,7 @@ get_page_from_l1e(
put_page_type(page);
put_page(page);
MEM_LOG("Attempt to change cache attributes of Xen heap page");
- return 0;
+ return -EACCES;
}
do {
@@ -890,7 +897,8 @@ get_page_from_l1e(
nx = (x & ~PGC_cacheattr_mask) | (cacheattr << PGC_cacheattr_base);
} while ( (y = cmpxchg(&page->count_info, x, nx)) != x );
- if ( unlikely(update_xen_mappings(mfn, cacheattr) != 0) )
+ err = update_xen_mappings(mfn, cacheattr);
+ if ( unlikely(err) )
{
cacheattr = y & PGC_cacheattr_mask;
do {
@@ -906,11 +914,11 @@ get_page_from_l1e(
" from L1 entry %" PRIpte ") for %d",
mfn, get_gpfn_from_mfn(mfn),
l1e_get_intpte(l1e), l1e_owner->domain_id);
- return 0;
+ return err;
}
}
- return 1;
+ return 0;
could_not_pin:
MEM_LOG("Error getting mfn %lx (pfn %lx) from L1 entry %" PRIpte
@@ -919,7 +927,7 @@ get_page_from_l1e(
l1e_get_intpte(l1e), l1e_owner->domain_id, pg_owner->domain_id);
if ( real_pg_owner != NULL )
put_page(page);
- return 0;
+ return -EBUSY;
}
@@ -1209,17 +1217,20 @@ static int alloc_l1_table(struct page_in
unsigned long pfn = page_to_mfn(page);
l1_pgentry_t *pl1e;
unsigned int i;
+ int ret = 0;
pl1e = map_domain_page(pfn);
for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ )
{
if ( is_guest_l1_slot(i) )
- switch ( get_page_from_l1e(pl1e[i], d, d) )
+ switch ( ret = get_page_from_l1e(pl1e[i], d, d) )
{
- case 0:
+ default:
goto fail;
- case -1:
+ case 0:
+ break;
+ case 1:
l1e_remove_flags(pl1e[i], _PAGE_RW);
break;
}
@@ -1237,7 +1248,7 @@ static int alloc_l1_table(struct page_in
put_page_from_l1e(pl1e[i], d);
unmap_domain_page(pl1e);
- return -EINVAL;
+ return ret;
}
static int create_pae_xen_mappings(struct domain *d, l3_pgentry_t *pl3e)
@@ -1806,12 +1817,15 @@ static int mod_l1_entry(l1_pgentry_t *pl
return rc;
}
- switch ( get_page_from_l1e(nl1e, pt_dom, pg_dom) )
+ switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
{
- case 0:
+ default:
return 0;
- case -1:
+ case 0:
+ break;
+ case 1:
l1e_remove_flags(nl1e, _PAGE_RW);
+ rc = 0;
break;
}
@@ -4987,7 +5001,7 @@ static int ptwr_emulated_update(
nl1e = l1e_from_intpte(val);
switch ( get_page_from_l1e(nl1e, d, d) )
{
- case 0:
+ default:
if ( is_pv_32bit_domain(d) && (bytes == 4) && (unaligned_addr & 4) &&
!do_cmpxchg && (l1e_get_flags(nl1e) & _PAGE_PRESENT) )
{
@@ -5007,7 +5021,9 @@ static int ptwr_emulated_update(
return X86EMUL_UNHANDLEABLE;
}
break;
- case -1:
+ case 0:
+ break;
+ case 1:
l1e_remove_flags(nl1e, _PAGE_RW);
break;
}
++++++ 22999-x86-mod_l1_entry-retcode.patch ++++++
References: bnc#675363
# HG changeset patch
# User Jan Beulich
# Date 1299687409 0
# Node ID 82b5f8d12903e140f957ae8d13d66e44be076b05
# Parent e9fab50d7b61d151d51a4b1088930c9e1ca2da47
x86: make mod_l1_entry() return a proper error code
... again is so that the guest can actually know the reason for the
(hypercall) failure.
Signed-off-by: Jan Beulich
Index: xen-4.1.5-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/mm.c
@@ -1780,15 +1780,16 @@ static int mod_l1_entry(l1_pgentry_t *pl
struct domain *pt_dom = pt_vcpu->domain;
unsigned long mfn;
p2m_type_t p2mt;
- int rc = 1;
+ int rc = 0;
if ( unlikely(__copy_from_user(&ol1e, pl1e, sizeof(ol1e)) != 0) )
- return 0;
+ return -EFAULT;
if ( unlikely(paging_mode_refcounts(pt_dom)) )
{
- rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu, preserve_ad);
- return rc;
+ if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu, preserve_ad) )
+ return 0;
+ return -EBUSY;
}
if ( l1e_get_flags(nl1e) & _PAGE_PRESENT )
@@ -1797,7 +1798,7 @@ static int mod_l1_entry(l1_pgentry_t *pl
mfn = mfn_x(gfn_to_mfn(p2m_get_hostp2m(pg_dom),
l1e_get_pfn(nl1e), &p2mt));
if ( !p2m_is_ram(p2mt) || unlikely(mfn == INVALID_MFN) )
- return 0;
+ return -EINVAL;
ASSERT((mfn & ~(PADDR_MASK >> PAGE_SHIFT)) == 0);
nl1e = l1e_from_pfn(mfn, l1e_get_flags(nl1e));
@@ -1805,22 +1806,23 @@ static int mod_l1_entry(l1_pgentry_t *pl
{
MEM_LOG("Bad L1 flags %x",
l1e_get_flags(nl1e) & l1_disallow_mask(pt_dom));
- return 0;
+ return -EINVAL;
}
/* Fast path for identical mapping, r/w and presence. */
if ( !l1e_has_changed(ol1e, nl1e, _PAGE_RW | _PAGE_PRESENT) )
{
adjust_guest_l1e(nl1e, pt_dom);
- rc = UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
- preserve_ad);
- return rc;
+ if ( UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
+ preserve_ad) )
+ return 0;
+ return -EBUSY;
}
switch ( rc = get_page_from_l1e(nl1e, pt_dom, pg_dom) )
{
default:
- return 0;
+ return rc;
case 0:
break;
case 1:
@@ -1834,13 +1836,13 @@ static int mod_l1_entry(l1_pgentry_t *pl
preserve_ad)) )
{
ol1e = nl1e;
- rc = 0;
+ rc = -EBUSY;
}
}
else if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, pt_vcpu,
preserve_ad)) )
{
- return 0;
+ return -EBUSY;
}
put_page_from_l1e(ol1e, pt_dom);
@@ -3532,9 +3534,10 @@ long do_mmu_update(
}
#endif
- okay = mod_l1_entry(va, l1e, mfn,
- cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
- pg_owner);
+ rc = mod_l1_entry(va, l1e, mfn,
+ cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
+ pg_owner);
+ okay = !rc;
}
break;
case PGT_l2_page_table:
@@ -4316,7 +4319,7 @@ static int __do_update_va_mapping(
goto out;
}
- rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner) ? 0 : -EINVAL;
+ rc = mod_l1_entry(pl1e, val, gl1mfn, 0, v, pg_owner);
page_unlock(gl1pg);
put_page(gl1pg);
++++++ 23000-x86-mod_l2_entry-retcode.patch ++++++
# HG changeset patch
# User Jan Beulich
# Date 1299687446 0
# Node ID d428fa67abaa0db20b915a697f1d5ba16e554185
# Parent 82b5f8d12903e140f957ae8d13d66e44be076b05
x86: make mod_l2_entry() return a proper error code
... so that finally all mod_lN_entry() functions behave identically,
allowing some cleanup in do_mmu_update() (which no longer needs to
track both an okay status and an error code).
Signed-off-by: Jan Beulich
Index: xen-4.1.5-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/mm.c
@@ -1861,16 +1861,16 @@ static int mod_l2_entry(l2_pgentry_t *pl
struct domain *d = vcpu->domain;
struct page_info *l2pg = mfn_to_page(pfn);
unsigned long type = l2pg->u.inuse.type_info;
- int rc = 1;
+ int rc = 0;
if ( unlikely(!is_guest_l2_slot(d, type, pgentry_ptr_to_slot(pl2e))) )
{
MEM_LOG("Illegal L2 update attempt in Xen-private area %p", pl2e);
- return 0;
+ return -EPERM;
}
if ( unlikely(__copy_from_user(&ol2e, pl2e, sizeof(ol2e)) != 0) )
- return 0;
+ return -EFAULT;
if ( l2e_get_flags(nl2e) & _PAGE_PRESENT )
{
@@ -1878,32 +1878,33 @@ static int mod_l2_entry(l2_pgentry_t *pl
{
MEM_LOG("Bad L2 flags %x",
l2e_get_flags(nl2e) & L2_DISALLOW_MASK);
- return 0;
+ return -EINVAL;
}
/* Fast path for identical mapping and presence. */
if ( !l2e_has_changed(ol2e, nl2e, _PAGE_PRESENT) )
{
adjust_guest_l2e(nl2e, d);
- rc = UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad);
- return rc;
+ if ( UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu, preserve_ad) )
+ return 0;
+ return -EBUSY;
}
- if ( unlikely(get_page_from_l2e(nl2e, pfn, d) < 0) )
- return 0;
+ if ( unlikely((rc = get_page_from_l2e(nl2e, pfn, d)) < 0) )
+ return rc;
adjust_guest_l2e(nl2e, d);
if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu,
preserve_ad)) )
{
ol2e = nl2e;
- rc = 0;
+ rc = -EBUSY;
}
}
else if ( unlikely(!UPDATE_ENTRY(l2, pl2e, ol2e, nl2e, pfn, vcpu,
preserve_ad)) )
{
- return 0;
+ return -EBUSY;
}
put_page_from_l2e(ol2e, pfn);
@@ -3383,7 +3384,7 @@ long do_mmu_update(
void *va;
unsigned long gpfn, gmfn, mfn;
struct page_info *page;
- int rc = 0, okay = 1, i = 0;
+ int rc = 0, i = 0;
unsigned int cmd, done = 0, pt_dom;
struct vcpu *v = current;
struct domain *d = v->domain, *pt_owner = d, *pg_owner;
@@ -3450,7 +3451,6 @@ long do_mmu_update(
}
cmd = req.ptr & (sizeof(l1_pgentry_t)-1);
- okay = 0;
switch ( cmd )
{
@@ -3467,6 +3467,7 @@ long do_mmu_update(
rc = xsm_mmu_normal_update(d, pg_owner, req.val);
if ( rc )
break;
+ rc = -EINVAL;
req.ptr -= cmd;
gmfn = req.ptr >> PAGE_SHIFT;
@@ -3537,7 +3538,6 @@ long do_mmu_update(
rc = mod_l1_entry(va, l1e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v,
pg_owner);
- okay = !rc;
}
break;
case PGT_l2_page_table:
@@ -3561,13 +3561,12 @@ long do_mmu_update(
else if ( p2m_ram_shared == l2e_p2mt )
{
MEM_LOG("Unexpected attempt to map shared page.\n");
- rc = -EINVAL;
break;
}
- okay = mod_l2_entry(va, l2e, mfn,
- cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
+ rc = mod_l2_entry(va, l2e, mfn,
+ cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
}
break;
case PGT_l3_page_table:
@@ -3591,13 +3590,11 @@ long do_mmu_update(
else if ( p2m_ram_shared == l3e_p2mt )
{
MEM_LOG("Unexpected attempt to map shared page.\n");
- rc = -EINVAL;
break;
}
rc = mod_l3_entry(va, l3e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
- okay = !rc;
}
break;
#if CONFIG_PAGING_LEVELS >= 4
@@ -3623,20 +3620,18 @@ long do_mmu_update(
else if ( p2m_ram_shared == l4e_p2mt )
{
MEM_LOG("Unexpected attempt to map shared page.\n");
- rc = -EINVAL;
break;
}
rc = mod_l4_entry(va, l4e, mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, 1, v);
- okay = !rc;
}
break;
#endif
case PGT_writable_page:
perfc_incr(writable_mmu_updates);
- okay = paging_write_guest_entry(
- v, va, req.val, _mfn(mfn));
+ if ( paging_write_guest_entry(v, va, req.val, _mfn(mfn)) )
+ rc = 0;
break;
}
page_unlock(page);
@@ -3646,8 +3641,8 @@ long do_mmu_update(
else if ( get_page_type(page, PGT_writable_page) )
{
perfc_incr(writable_mmu_updates);
- okay = paging_write_guest_entry(
- v, va, req.val, _mfn(mfn));
+ if ( paging_write_guest_entry(v, va, req.val, _mfn(mfn)) )
+ rc = 0;
put_page_type(page);
}
@@ -3668,17 +3663,18 @@ long do_mmu_update(
if ( unlikely(!get_page_from_pagenr(mfn, pg_owner)) )
{
MEM_LOG("Could not get page for mach->phys update");
+ rc = -EINVAL;
break;
}
if ( unlikely(paging_mode_translate(pg_owner)) )
{
MEM_LOG("Mach-phys update on auto-translate guest");
+ rc = -EINVAL;
break;
}
set_gpfn_from_mfn(mfn, gpfn);
- okay = 1;
paging_mark_dirty(pg_owner, mfn);
@@ -3688,15 +3684,11 @@ long do_mmu_update(
default:
MEM_LOG("Invalid page update command %x", cmd);
rc = -ENOSYS;
- okay = 0;
break;
}
- if ( unlikely(!okay) )
- {
- rc = rc ? rc : -EINVAL;
+ if ( unlikely(rc) )
break;
- }
guest_handle_add_offset(ureqs, 1);
}
++++++ 23050-xentrace_dynamic_tracebuffer_allocation.patch ++++++
changeset: 23050:4ebba54b666f
user: Olaf Hering
date: Thu Mar 17 13:29:01 2011 +0000
files: xen/common/trace.c
description:
xentrace: dynamic tracebuffer allocation
Allocate tracebuffers dynamically, based on the requested buffer size.
Calculate t_info_size from requested t_buf size.
Fix allocation failure path, free pages outside the spinlock.
Remove casts for rawbuf, it can be a void pointer since no math is
done.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 249 ++++++++++++++++++++++-------------------------------
1 file changed, 104 insertions(+), 145 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -42,14 +42,14 @@ CHECK_t_buf;
#define compat_t_rec t_rec
#endif
-/* opt_tbuf_size: trace buffer size (in pages) */
-static unsigned int opt_tbuf_size = 0;
+/* opt_tbuf_size: trace buffer size (in pages) for each cpu */
+static unsigned int opt_tbuf_size;
integer_param("tbuf_size", opt_tbuf_size);
/* Pointers to the meta-data objects for all system trace buffers */
static struct t_info *t_info;
-#define T_INFO_PAGES 2 /* Size fixed at 2 pages for now. */
-#define T_INFO_SIZE ((T_INFO_PAGES)*(PAGE_SIZE))
+static unsigned int t_info_pages;
+
static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
@@ -78,6 +78,21 @@ static u32 tb_event_mask = TRC_ALL;
* i.e., sizeof(_type) * ans >= _x. */
#define fit_to_type(_type, _x) (((_x)+sizeof(_type)-1) / sizeof(_type))
+static int cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ if ( action == CPU_UP_PREPARE )
+ spin_lock_init(&per_cpu(t_lock, cpu));
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block cpu_nfb = {
+ .notifier_call = cpu_callback
+};
+
static void calc_tinfo_first_offset(void)
{
int offset_in_bytes = offsetof(struct t_info, mfn_offset[NR_CPUS]);
@@ -85,20 +100,34 @@ static void calc_tinfo_first_offset(void
}
/**
- * check_tbuf_size - check to make sure that the proposed size will fit
+ * calculate_tbuf_size - check to make sure that the proposed size will fit
* in the currently sized struct t_info and allows prod and cons to
* reach double the value without overflow.
+ * Initialize t_info_pages based on number of trace pages.
*/
-static int check_tbuf_size(u32 pages)
+static int calculate_tbuf_size(unsigned int pages)
{
struct t_buf dummy;
typeof(dummy.prod) size;
-
- size = ((typeof(dummy.prod))pages) * PAGE_SIZE;
-
- return (size / PAGE_SIZE != pages)
- || (size + size < size)
- || (num_online_cpus() * pages + t_info_first_offset > T_INFO_SIZE / sizeof(uint32_t));
+
+ /* force maximum value for an unsigned type */
+ size = -1;
+
+ /* max size holds up to n pages */
+ size /= PAGE_SIZE;
+ if ( pages > size )
+ {
+ gdprintk(XENLOG_INFO, "%s: requested number of %u pages reduced to %u\n",
+ __func__, pages, (unsigned int)size);
+ pages = size;
+ }
+
+ t_info_pages = num_online_cpus() * pages + t_info_first_offset;
+ t_info_pages *= sizeof(uint32_t);
+ t_info_pages /= PAGE_SIZE;
+ if ( t_info_pages % PAGE_SIZE )
+ t_info_pages++;
+ return pages;
}
/**
@@ -111,47 +140,28 @@ static int check_tbuf_size(u32 pages)
* This function may also be called later when enabling trace buffers
* via the SET_SIZE hypercall.
*/
-static int alloc_trace_bufs(void)
+static int alloc_trace_bufs(unsigned int pages)
{
- int i, cpu, order;
- unsigned long nr_pages;
+ int i, cpu, order;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list;
int offset;
- if ( opt_tbuf_size == 0 )
- return -EINVAL;
+ if ( t_info )
+ return -EBUSY;
- if ( check_tbuf_size(opt_tbuf_size) )
- {
- printk("Xen trace buffers: tb size %d too large. "
- "Tracing disabled.\n",
- opt_tbuf_size);
+ if ( pages == 0 )
return -EINVAL;
- }
- /* t_info size is fixed for now. Currently this works great, so there
- * seems to be no need to make it dynamic. */
- t_info = alloc_xenheap_pages(get_order_from_pages(T_INFO_PAGES), 0);
- if ( t_info == NULL )
- {
- printk("Xen trace buffers: t_info allocation failed! "
- "Tracing disabled.\n");
- return -ENOMEM;
- }
-
- for ( i = 0; i < T_INFO_PAGES; i++ )
- share_xen_page_with_privileged_guests(
- virt_to_page(t_info) + i, XENSHARE_readonly);
-
- t_info_mfn_list = (uint32_t *)t_info;
- offset = t_info_first_offset;
+ /* Calculate offset in u32 of first mfn */
+ calc_tinfo_first_offset();
- t_info->tbuf_size = opt_tbuf_size;
- printk(XENLOG_INFO "tbuf_size %d\n", t_info->tbuf_size);
+ pages = calculate_tbuf_size(pages);
+ order = get_order_from_pages(pages);
- nr_pages = opt_tbuf_size;
- order = get_order_from_pages(nr_pages);
+ t_info = alloc_xenheap_pages(get_order_from_pages(t_info_pages), 0);
+ if ( t_info == NULL )
+ goto out_dealloc;
/*
* First, allocate buffers for all of the cpus. If any
@@ -159,27 +169,29 @@ static int alloc_trace_bufs(void)
*/
for_each_online_cpu(cpu)
{
- int flags;
- char *rawbuf;
+ void *rawbuf;
struct t_buf *buf;
if ( (rawbuf = alloc_xenheap_pages(
order, MEMF_bits(32 + PAGE_SHIFT))) == NULL )
{
- printk("Xen trace buffers: memory allocation failed\n");
- opt_tbuf_size = 0;
+ printk("Xen trace buffers: memory allocation failed on cpu %d\n", cpu);
goto out_dealloc;
}
- spin_lock_irqsave(&per_cpu(t_lock, cpu), flags);
-
- per_cpu(t_bufs, cpu) = buf = (struct t_buf *)rawbuf;
+ per_cpu(t_bufs, cpu) = buf = rawbuf;
buf->cons = buf->prod = 0;
per_cpu(t_data, cpu) = (unsigned char *)(buf + 1);
+ }
- spin_unlock_irqrestore(&per_cpu(t_lock, cpu), flags);
+ offset = t_info_first_offset;
+ t_info_mfn_list = (uint32_t *)t_info;
- }
+ for(i = 0; i < t_info_pages; i++)
+ share_xen_page_with_privileged_guests(
+ virt_to_page(t_info) + i, XENSHARE_readonly);
+
+ t_info->tbuf_size = pages;
/*
* Now share the pages to xentrace can map them, and write them in
@@ -188,89 +200,75 @@ static int alloc_trace_bufs(void)
for_each_online_cpu(cpu)
{
/* Share pages so that xentrace can map them. */
- char *rawbuf;
+ void *rawbuf = per_cpu(t_bufs, cpu);
+ struct page_info *p = virt_to_page(rawbuf);
+ uint32_t mfn = virt_to_mfn(rawbuf);
- if ( (rawbuf = (char *)per_cpu(t_bufs, cpu)) )
+ for ( i = 0; i < pages; i++ )
{
- struct page_info *p = virt_to_page(rawbuf);
- uint32_t mfn = virt_to_mfn(rawbuf);
+ share_xen_page_with_privileged_guests(p + i, XENSHARE_writable);
- for ( i = 0; i < nr_pages; i++ )
- {
- share_xen_page_with_privileged_guests(
- p + i, XENSHARE_writable);
-
- t_info_mfn_list[offset + i]=mfn + i;
- }
- /* Write list first, then write per-cpu offset. */
- wmb();
- t_info->mfn_offset[cpu]=offset;
- printk(XENLOG_INFO "p%d mfn %"PRIx32" offset %d\n",
- cpu, mfn, offset);
- offset+=i;
+ t_info_mfn_list[offset + i]=mfn + i;
}
+ t_info->mfn_offset[cpu]=offset;
+ printk(XENLOG_INFO "p%d mfn %"PRIx32" offset %d\n",
+ cpu, mfn, offset);
+ offset+=i;
+
+ spin_lock_init(&per_cpu(t_lock, cpu));
}
- data_size = (opt_tbuf_size * PAGE_SIZE - sizeof(struct t_buf));
+ data_size = (pages * PAGE_SIZE - sizeof(struct t_buf));
t_buf_highwater = data_size >> 1; /* 50% high water */
+ opt_tbuf_size = pages;
+
+ register_cpu_notifier(&cpu_nfb);
+
+ printk("Xen trace buffers: initialised\n");
+ wmb(); /* above must be visible before tb_init_done flag set */
+ tb_init_done = 1;
return 0;
+
out_dealloc:
for_each_online_cpu(cpu)
{
- int flags;
- char * rawbuf;
-
- spin_lock_irqsave(&per_cpu(t_lock, cpu), flags);
- if ( (rawbuf = (char *)per_cpu(t_bufs, cpu)) )
+ void *rawbuf = per_cpu(t_bufs, cpu);
+ per_cpu(t_bufs, cpu) = NULL;
+ printk("Xen trace buffers: cpu %d p %p\n", cpu, rawbuf);
+ if ( rawbuf )
{
- per_cpu(t_bufs, cpu) = NULL;
ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
free_xenheap_pages(rawbuf, order);
}
- spin_unlock_irqrestore(&per_cpu(t_lock, cpu), flags);
}
-
+ free_xenheap_pages(t_info, get_order_from_pages(t_info_pages));
+ t_info = NULL;
+ printk("Xen trace buffers: allocation failed! Tracing disabled.\n");
return -ENOMEM;
}
/**
- * tb_set_size - handle the logic involved with dynamically
- * allocating and deallocating tbufs
+ * tb_set_size - handle the logic involved with dynamically allocating tbufs
*
* This function is called when the SET_SIZE hypercall is done.
*/
-static int tb_set_size(int size)
+static int tb_set_size(unsigned int pages)
{
/*
* Setting size is a one-shot operation. It can be done either at
* boot time or via control tools, but not by both. Once buffers
* are created they cannot be destroyed.
*/
- int ret = 0;
-
- if ( opt_tbuf_size != 0 )
+ if ( opt_tbuf_size && pages != opt_tbuf_size )
{
- if ( size != opt_tbuf_size )
- gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
- opt_tbuf_size, size);
+ gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
+ opt_tbuf_size, pages);
return -EINVAL;
}
- if ( size <= 0 )
- return -EINVAL;
-
- opt_tbuf_size = size;
-
- if ( (ret = alloc_trace_bufs()) != 0 )
- {
- opt_tbuf_size = 0;
- return ret;
- }
-
- printk("Xen trace buffers: initialized\n");
- return 0;
+ return alloc_trace_bufs(pages);
}
int trace_will_trace_event(u32 event)
@@ -299,21 +297,6 @@ int trace_will_trace_event(u32 event)
return 1;
}
-static int cpu_callback(
- struct notifier_block *nfb, unsigned long action, void *hcpu)
-{
- unsigned int cpu = (unsigned long)hcpu;
-
- if ( action == CPU_UP_PREPARE )
- spin_lock_init(&per_cpu(t_lock, cpu));
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block cpu_nfb = {
- .notifier_call = cpu_callback
-};
-
/**
* init_trace_bufs - performs initialization of the per-cpu trace buffers.
*
@@ -323,37 +306,13 @@ static struct notifier_block cpu_nfb = {
*/
void __init init_trace_bufs(void)
{
- int i;
-
- /* Calculate offset in u32 of first mfn */
- calc_tinfo_first_offset();
-
- /* Per-cpu t_lock initialisation. */
- for_each_online_cpu ( i )
- spin_lock_init(&per_cpu(t_lock, i));
- register_cpu_notifier(&cpu_nfb);
-
- if ( opt_tbuf_size == 0 )
- {
- printk("Xen trace buffers: disabled\n");
- goto fail;
- }
-
- if ( alloc_trace_bufs() != 0 )
+ if ( opt_tbuf_size && alloc_trace_bufs(opt_tbuf_size) )
{
- dprintk(XENLOG_INFO, "Xen trace buffers: "
- "allocation size %d failed, disabling\n",
- opt_tbuf_size);
- goto fail;
+ gdprintk(XENLOG_INFO, "Xen trace buffers: "
+ "allocation size %d failed, disabling\n",
+ opt_tbuf_size);
+ opt_tbuf_size = 0;
}
-
- printk("Xen trace buffers: initialised\n");
- wmb(); /* above must be visible before tb_init_done flag set */
- tb_init_done = 1;
- return;
-
- fail:
- opt_tbuf_size = 0;
}
/**
@@ -372,7 +331,7 @@ int tb_control(xen_sysctl_tbuf_op_t *tbc
case XEN_SYSCTL_TBUFOP_get_info:
tbc->evt_mask = tb_event_mask;
tbc->buffer_mfn = t_info ? virt_to_mfn(t_info) : 0;
- tbc->size = T_INFO_PAGES * PAGE_SIZE;
+ tbc->size = t_info_pages * PAGE_SIZE;
break;
case XEN_SYSCTL_TBUFOP_set_cpu_mask:
rc = xenctl_cpumap_to_cpumask(&tb_cpu_mask, &tbc->cpu_mask);
++++++ 23074-pfn.h.patch ++++++
References: fate#311376, fate#311529, bnc#578927, bnc#628554
# HG changeset patch
# User Keir Fraser
# Date 1300887295 0
# Node ID c80e0fb4fe932b4d8379ea5739af93ae22a30ea5
# Parent 3831bd253e02aa0536ed32e936777d026abb955e
Define new header for PFN_{DOWN,UP} macros.
Signed-off-by: Keir Fraser
Index: xen-4.1.4-testing/xen/arch/x86/domain_build.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/domain_build.c
+++ xen-4.1.4-testing/xen/arch/x86/domain_build.c
@@ -21,6 +21,7 @@
#include
#include
#include
+#include
#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/e820.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/e820.c
+++ xen-4.1.4-testing/xen/arch/x86/e820.c
@@ -4,6 +4,7 @@
#include
#include
#include
+#include
#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.4-testing/xen/arch/x86/mm.c
@@ -100,6 +100,7 @@
#include
#include
#include
+#include
#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/msi.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/msi.c
+++ xen-4.1.4-testing/xen/arch/x86/msi.c
@@ -18,6 +18,7 @@
#include
#include
#include
+#include
#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/numa.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/numa.c
+++ xen-4.1.4-testing/xen/arch/x86/numa.c
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/setup.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/setup.c
+++ xen-4.1.4-testing/xen/arch/x86/setup.c
@@ -22,6 +22,7 @@
#include
#include
#include
+#include
#include
#include
#ifdef CONFIG_COMPAT
Index: xen-4.1.4-testing/xen/arch/x86/srat.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/srat.c
+++ xen-4.1.4-testing/xen/arch/x86/srat.c
@@ -17,6 +17,7 @@
#include
#include
#include
+#include
#include
#include
Index: xen-4.1.4-testing/xen/arch/x86/tboot.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/tboot.c
+++ xen-4.1.4-testing/xen/arch/x86/tboot.c
@@ -6,6 +6,7 @@
#include
#include
#include
+#include
#include
#include
#include
Index: xen-4.1.4-testing/xen/include/asm-x86/page.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/asm-x86/page.h
+++ xen-4.1.4-testing/xen/include/asm-x86/page.h
@@ -396,8 +396,6 @@ static inline uint32_t cacheattr_to_pte_
#endif /* !__ASSEMBLY__ */
-#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
-#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
#define PAGE_ALIGN(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK)
#endif /* __X86_PAGE_H__ */
Index: xen-4.1.4-testing/xen/include/xen/pci.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/xen/pci.h
+++ xen-4.1.4-testing/xen/include/xen/pci.h
@@ -13,6 +13,7 @@
#include
#include
#include
+#include
/*
* The PCI interface treats multi-function devices as independent
Index: xen-4.1.4-testing/xen/include/xen/pfn.h
===================================================================
--- /dev/null
+++ xen-4.1.4-testing/xen/include/xen/pfn.h
@@ -0,0 +1,9 @@
+#ifndef __XEN_PFN_H__
+#define __XEN_PFN_H__
+
+#include
+
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+
+#endif /* __XEN_PFN_H__ */
++++++ 23091-xentrace_fix_t_info_pages_calculation..patch ++++++
changeset: 23091:67632e5cf652
user: Olaf Hering
date: Fri Mar 25 08:56:33 2011 +0000
files: xen/common/trace.c
description:
xentrace: fix t_info_pages calculation.
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -29,6 +29,7 @@
#include
#include
#include
+#include
#include
#include
#include
@@ -109,6 +110,7 @@ static int calculate_tbuf_size(unsigned
{
struct t_buf dummy;
typeof(dummy.prod) size;
+ unsigned int t_info_words, t_info_bytes;
/* force maximum value for an unsigned type */
size = -1;
@@ -122,11 +124,9 @@ static int calculate_tbuf_size(unsigned
pages = size;
}
- t_info_pages = num_online_cpus() * pages + t_info_first_offset;
- t_info_pages *= sizeof(uint32_t);
- t_info_pages /= PAGE_SIZE;
- if ( t_info_pages % PAGE_SIZE )
- t_info_pages++;
+ t_info_words = num_online_cpus() * pages + t_info_first_offset;
+ t_info_bytes = t_info_words * sizeof(uint32_t);
+ t_info_pages = PFN_UP(t_info_bytes);
return pages;
}
++++++ 23092-xentrace_print_calculated_numbers_in_calculate_tbuf_size.patch ++++++
changeset: 23092:45dafa422812
user: Olaf Hering
date: Fri Mar 25 08:57:28 2011 +0000
files: xen/common/trace.c
description:
xentrace: print calculated numbers in calculate_tbuf_size()
Print number of pages to allocate for per-cpu tracebuffer and metadata
to ease debugging when allocation fails.
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 2 ++
1 file changed, 2 insertions(+)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -127,6 +127,8 @@ static int calculate_tbuf_size(unsigned
t_info_words = num_online_cpus() * pages + t_info_first_offset;
t_info_bytes = t_info_words * sizeof(uint32_t);
t_info_pages = PFN_UP(t_info_bytes);
+ printk(XENLOG_INFO "xentrace: requesting %u t_info pages for %u trace pages on %u cpus\n",
+ t_info_pages, pages, num_online_cpus());
return pages;
}
++++++ 23093-xentrace_remove_gdprintk_usage_since_they_are_not_in_guest_context.patch ++++++
changeset: 23093:4b784605b089
user: Olaf Hering
date: Fri Mar 25 08:57:47 2011 +0000
files: xen/common/trace.c
description:
xentrace: remove gdprintk usage since they are not in guest context
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -119,7 +119,7 @@ static int calculate_tbuf_size(unsigned
size /= PAGE_SIZE;
if ( pages > size )
{
- gdprintk(XENLOG_INFO, "%s: requested number of %u pages reduced to %u\n",
+ printk(XENLOG_INFO "%s: requested number of %u pages reduced to %u\n",
__func__, pages, (unsigned int)size);
pages = size;
}
@@ -265,7 +265,7 @@ static int tb_set_size(unsigned int page
*/
if ( opt_tbuf_size && pages != opt_tbuf_size )
{
- gdprintk(XENLOG_INFO, "tb_set_size from %d to %d not implemented\n",
+ printk(XENLOG_INFO "tb_set_size from %d to %d not implemented\n",
opt_tbuf_size, pages);
return -EINVAL;
}
@@ -310,7 +310,7 @@ void __init init_trace_bufs(void)
{
if ( opt_tbuf_size && alloc_trace_bufs(opt_tbuf_size) )
{
- gdprintk(XENLOG_INFO, "Xen trace buffers: "
+ printk(XENLOG_INFO "Xen trace buffers: "
"allocation size %d failed, disabling\n",
opt_tbuf_size);
opt_tbuf_size = 0;
++++++ 23094-xentrace_update_comments.patch ++++++
changeset: 23094:d09e8885bc82
user: Olaf Hering
date: Fri Mar 25 08:58:04 2011 +0000
files: xen/common/trace.c
description:
xentrace: update comments
Fix a typo, remove redundant comment.
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -196,12 +196,11 @@ static int alloc_trace_bufs(unsigned int
t_info->tbuf_size = pages;
/*
- * Now share the pages to xentrace can map them, and write them in
+ * Now share the pages so xentrace can map them, and write them in
* the global t_info structure.
*/
for_each_online_cpu(cpu)
{
- /* Share pages so that xentrace can map them. */
void *rawbuf = per_cpu(t_bufs, cpu);
struct page_info *p = virt_to_page(rawbuf);
uint32_t mfn = virt_to_mfn(rawbuf);
++++++ 23095-xentrace_use_consistent_printk_prefix.patch ++++++
changeset: 23095:941119d58655
user: Olaf Hering
date: Fri Mar 25 09:01:37 2011 +0000
files: xen/common/trace.c
description:
xentrace: use consistent printk prefix
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 31 +++++++++++++++++--------------
1 file changed, 17 insertions(+), 14 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -119,16 +119,18 @@ static int calculate_tbuf_size(unsigned
size /= PAGE_SIZE;
if ( pages > size )
{
- printk(XENLOG_INFO "%s: requested number of %u pages reduced to %u\n",
- __func__, pages, (unsigned int)size);
+ printk(XENLOG_INFO "xentrace: requested number of %u pages "
+ "reduced to %u\n",
+ pages, (unsigned int)size);
pages = size;
}
t_info_words = num_online_cpus() * pages + t_info_first_offset;
t_info_bytes = t_info_words * sizeof(uint32_t);
t_info_pages = PFN_UP(t_info_bytes);
- printk(XENLOG_INFO "xentrace: requesting %u t_info pages for %u trace pages on %u cpus\n",
- t_info_pages, pages, num_online_cpus());
+ printk(XENLOG_INFO "xentrace: requesting %u t_info pages "
+ "for %u trace pages on %u cpus\n",
+ t_info_pages, pages, num_online_cpus());
return pages;
}
@@ -177,7 +179,8 @@ static int alloc_trace_bufs(unsigned int
if ( (rawbuf = alloc_xenheap_pages(
order, MEMF_bits(32 + PAGE_SHIFT))) == NULL )
{
- printk("Xen trace buffers: memory allocation failed on cpu %d\n", cpu);
+ printk(XENLOG_INFO "xentrace: memory allocation failed "
+ "on cpu %d\n", cpu);
goto out_dealloc;
}
@@ -212,7 +215,7 @@ static int alloc_trace_bufs(unsigned int
t_info_mfn_list[offset + i]=mfn + i;
}
t_info->mfn_offset[cpu]=offset;
- printk(XENLOG_INFO "p%d mfn %"PRIx32" offset %d\n",
+ printk(XENLOG_INFO "xentrace: p%d mfn %"PRIx32" offset %d\n",
cpu, mfn, offset);
offset+=i;
@@ -225,7 +228,7 @@ static int alloc_trace_bufs(unsigned int
register_cpu_notifier(&cpu_nfb);
- printk("Xen trace buffers: initialised\n");
+ printk("xentrace: initialised\n");
wmb(); /* above must be visible before tb_init_done flag set */
tb_init_done = 1;
@@ -236,7 +239,7 @@ out_dealloc:
{
void *rawbuf = per_cpu(t_bufs, cpu);
per_cpu(t_bufs, cpu) = NULL;
- printk("Xen trace buffers: cpu %d p %p\n", cpu, rawbuf);
+ printk(XENLOG_DEBUG "xentrace: cpu %d p %p\n", cpu, rawbuf);
if ( rawbuf )
{
ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
@@ -245,7 +248,7 @@ out_dealloc:
}
free_xenheap_pages(t_info, get_order_from_pages(t_info_pages));
t_info = NULL;
- printk("Xen trace buffers: allocation failed! Tracing disabled.\n");
+ printk(XENLOG_WARNING "xentrace: allocation failed! Tracing disabled.\n");
return -ENOMEM;
}
@@ -264,8 +267,9 @@ static int tb_set_size(unsigned int page
*/
if ( opt_tbuf_size && pages != opt_tbuf_size )
{
- printk(XENLOG_INFO "tb_set_size from %d to %d not implemented\n",
- opt_tbuf_size, pages);
+ printk(XENLOG_INFO "xentrace: tb_set_size from %d to %d "
+ "not implemented\n",
+ opt_tbuf_size, pages);
return -EINVAL;
}
@@ -309,9 +313,8 @@ void __init init_trace_bufs(void)
{
if ( opt_tbuf_size && alloc_trace_bufs(opt_tbuf_size) )
{
- printk(XENLOG_INFO "Xen trace buffers: "
- "allocation size %d failed, disabling\n",
- opt_tbuf_size);
+ printk(XENLOG_INFO "xentrace: allocation size %d failed, disabling\n",
+ opt_tbuf_size);
opt_tbuf_size = 0;
}
}
++++++ 23096-x86-hpet-no-cpumask_lock.patch ++++++
# HG changeset patch
# User Jan Beulich
# Date 1301043797 0
# Node ID a65612bcbb921e98a8843157bf365e4ab16e8144
# Parent 941119d58655f2b2df86d9ecc4cb502bbc5e783c
x86/hpet: eliminate cpumask_lock
According to the (now getting removed) comment in struct
hpet_event_channel, this was to prevent accessing a CPU's
timer_deadline after it got cleared from cpumask. This can be done
without a lock altogether - hpet_broadcast_exit() can simply clear
the bit, and handle_hpet_broadcast() can read timer_deadline before
looking at the mask a second time (the cpumask bit was already
found set by the surrounding loop).
Signed-off-by: Jan Beulich
Acked-by: Gang Wei
Index: xen-4.1.4-testing/xen/arch/x86/hpet.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/hpet.c
+++ xen-4.1.4-testing/xen/arch/x86/hpet.c
@@ -34,18 +34,6 @@ struct hpet_event_channel
int shift;
s_time_t next_event;
cpumask_t cpumask;
- /*
- * cpumask_lock is used to prevent hpet intr handler from accessing other
- * cpu's timer_deadline after the other cpu's mask was cleared --
- * mask cleared means cpu waken up, then accessing timer_deadline from
- * other cpu is not safe.
- * It is not used for protecting cpumask, so set ops needn't take it.
- * Multiple cpus clear cpumask simultaneously is ok due to the atomic
- * feature of cpu_clear, so hpet_broadcast_exit() can take read lock for
- * clearing cpumask, and handle_hpet_broadcast() have to take write lock
- * for read cpumask & access timer_deadline.
- */
- rwlock_t cpumask_lock;
spinlock_t lock;
void (*event_handler)(struct hpet_event_channel *);
@@ -208,17 +196,18 @@ again:
/* find all expired events */
for_each_cpu_mask(cpu, ch->cpumask)
{
- write_lock_irq(&ch->cpumask_lock);
+ s_time_t deadline;
- if ( cpu_isset(cpu, ch->cpumask) )
- {
- if ( per_cpu(timer_deadline, cpu) <= now )
- cpu_set(cpu, mask);
- else if ( per_cpu(timer_deadline, cpu) < next_event )
- next_event = per_cpu(timer_deadline, cpu);
- }
+ rmb();
+ deadline = per_cpu(timer_deadline, cpu);
+ rmb();
+ if ( !cpu_isset(cpu, ch->cpumask) )
+ continue;
- write_unlock_irq(&ch->cpumask_lock);
+ if ( deadline <= now )
+ cpu_set(cpu, mask);
+ else if ( deadline < next_event )
+ next_event = deadline;
}
/* wakeup the cpus which have an expired event. */
@@ -604,7 +593,6 @@ void hpet_broadcast_init(void)
hpet_events[i].shift = 32;
hpet_events[i].next_event = STIME_MAX;
spin_lock_init(&hpet_events[i].lock);
- rwlock_init(&hpet_events[i].cpumask_lock);
wmb();
hpet_events[i].event_handler = handle_hpet_broadcast;
}
@@ -640,7 +628,6 @@ void hpet_broadcast_init(void)
legacy_hpet_event.idx = 0;
legacy_hpet_event.flags = 0;
spin_lock_init(&legacy_hpet_event.lock);
- rwlock_init(&legacy_hpet_event.cpumask_lock);
wmb();
legacy_hpet_event.event_handler = handle_hpet_broadcast;
@@ -722,9 +709,7 @@ void hpet_broadcast_exit(void)
if ( !reprogram_timer(this_cpu(timer_deadline)) )
raise_softirq(TIMER_SOFTIRQ);
- read_lock_irq(&ch->cpumask_lock);
cpu_clear(cpu, ch->cpumask);
- read_unlock_irq(&ch->cpumask_lock);
if ( ch != &legacy_hpet_event )
{
++++++ 23099-x86-rwlock-scalability.patch ++++++
# HG changeset patch
# User Keir Fraser
# Date 1301126601 0
# Node ID 612171ff82ea51aaf65d98fd1a551eb8d50fb481
# Parent c9f745c153ec8c3775e2ee03adc3cb30370b84f6
rwlock: Allow to scale to 2^31-1 readers on x86.
Also rework to match the 'trylock' style of raw function used for
spinlocks.
Inspired by Jan Beulich's patch to do similar improved scaling.
Signed-off-by: Keir Fraser
# HG changeset patch
# User Keir Fraser
# Date 1301214635 -3600
# Node ID 0bc1c4746c8939337f693a513fd837fc03477db1
# Parent 48dac730a93b27ff60a340564e9a7afd7f9385f4
x86_32: Fix _raw_read_trylock() build on some gcc versions.
Was broken by 23099:612171ff82ea.
A bool_t is a single byte, and needs a 'q' register constraint. Avoid
the whole issue by changing the variable to an int, and explicitly
specify the operand suffix as 'l' for good measure.
Signed-off-by: Keir Fraser
Index: xen-4.1.2-testing/xen/common/spinlock.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/spinlock.c
+++ xen-4.1.2-testing/xen/common/spinlock.c
@@ -234,7 +234,11 @@ void _spin_unlock_recursive(spinlock_t *
void _read_lock(rwlock_t *lock)
{
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ }
preempt_disable();
}
@@ -243,7 +247,13 @@ void _read_lock_irq(rwlock_t *lock)
ASSERT(local_irq_is_enabled());
local_irq_disable();
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ local_irq_enable();
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_disable();
+ }
preempt_disable();
}
@@ -252,11 +262,26 @@ unsigned long _read_lock_irqsave(rwlock_
unsigned long flags;
local_irq_save(flags);
check_lock(&lock->debug);
- _raw_read_lock(&lock->raw);
+ while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+ {
+ local_irq_restore(flags);
+ while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_save(flags);
+ }
preempt_disable();
return flags;
}
+int _read_trylock(rwlock_t *lock)
+{
+ check_lock(&lock->debug);
+ if ( !_raw_read_trylock(&lock->raw) )
+ return 0;
+ preempt_disable();
+ return 1;
+}
+
void _read_unlock(rwlock_t *lock)
{
preempt_enable();
@@ -280,7 +305,11 @@ void _read_unlock_irqrestore(rwlock_t *l
void _write_lock(rwlock_t *lock)
{
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ }
preempt_disable();
}
@@ -289,7 +318,13 @@ void _write_lock_irq(rwlock_t *lock)
ASSERT(local_irq_is_enabled());
local_irq_disable();
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ local_irq_enable();
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_disable();
+ }
preempt_disable();
}
@@ -298,7 +333,13 @@ unsigned long _write_lock_irqsave(rwlock
unsigned long flags;
local_irq_save(flags);
check_lock(&lock->debug);
- _raw_write_lock(&lock->raw);
+ while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+ {
+ local_irq_restore(flags);
+ while ( likely(_raw_rw_is_locked(&lock->raw)) )
+ cpu_relax();
+ local_irq_save(flags);
+ }
preempt_disable();
return flags;
}
Index: xen-4.1.2-testing/xen/include/asm-ia64/linux-xen/asm/spinlock.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/asm-ia64/linux-xen/asm/spinlock.h
+++ xen-4.1.2-testing/xen/include/asm-ia64/linux-xen/asm/spinlock.h
@@ -35,17 +35,6 @@ typedef struct {
} raw_rwlock_t;
#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0, 0 }
-#define _raw_read_lock(rw) \
-do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
- \
- while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
- ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
- while (*(volatile int *)__read_lock_ptr < 0) \
- cpu_relax(); \
- } \
-} while (0)
-
#define _raw_read_unlock(rw) \
do { \
raw_rwlock_t *__read_lock_ptr = (rw); \
@@ -53,20 +42,6 @@ do { \
} while (0)
#ifdef ASM_SUPPORTED
-#define _raw_write_lock(rw) \
-do { \
- __asm__ __volatile__ ( \
- "mov ar.ccv = r0\n" \
- "dep r29 = -1, r0, 31, 1;;\n" \
- "1:\n" \
- "ld4 r2 = [%0];;\n" \
- "cmp4.eq p0,p7 = r0,r2\n" \
- "(p7) br.cond.spnt.few 1b \n" \
- "cmpxchg4.acq r2 = [%0], r29, ar.ccv;;\n" \
- "cmp4.eq p0,p7 = r0, r2\n" \
- "(p7) br.cond.spnt.few 1b;;\n" \
- :: "r"(rw) : "ar.ccv", "p7", "r2", "r29", "memory"); \
-} while(0)
#define _raw_write_trylock(rw) \
({ \
@@ -82,16 +57,6 @@ do { \
#else /* !ASM_SUPPORTED */
-#define _raw_write_lock(l) \
-({ \
- __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
- __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
- do { \
- while (*ia64_write_lock_ptr) \
- ia64_barrier(); \
- ia64_val = ia64_cmpxchg4_acq(ia64_write_lock_ptr, ia64_set_val, 0); \
- } while (ia64_val); \
-})
#define _raw_write_trylock(rw) \
({ \
Index: xen-4.1.2-testing/xen/include/asm-x86/spinlock.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/asm-x86/spinlock.h
+++ xen-4.1.2-testing/xen/include/asm-x86/spinlock.h
@@ -35,51 +35,29 @@ typedef struct {
volatile int lock;
} raw_rwlock_t;
-#define RW_LOCK_BIAS 0x01000000
-#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { RW_LOCK_BIAS }
+#define RW_WRITE_BIAS 0x7fffffff
+#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0 }
-static always_inline void _raw_read_lock(raw_rwlock_t *rw)
+static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
{
- asm volatile (
- "1: lock; decl %0 \n"
- " jns 3f \n"
- " lock; incl %0 \n"
- "2: rep; nop \n"
- " cmpl $1,%0 \n"
- " js 2b \n"
- " jmp 1b \n"
- "3:"
- : "=m" (rw->lock) : : "memory" );
-}
+ int acquired;
-static always_inline void _raw_write_lock(raw_rwlock_t *rw)
-{
asm volatile (
- "1: lock; subl %1,%0 \n"
- " jz 3f \n"
- " lock; addl %1,%0 \n"
- "2: rep; nop \n"
- " cmpl %1,%0 \n"
- " jne 2b \n"
+ " lock; decl %0 \n"
+ " jns 2f \n"
+ "1: .subsection 1 \n"
+ "2: lock; incl %0 \n"
+ " decl %1 \n"
" jmp 1b \n"
- "3:"
- : "=m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory" );
+ " .subsection 0 \n"
+ : "=m" (rw->lock), "=r" (acquired) : "1" (1) : "memory" );
+
+ return acquired;
}
static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
{
- int rc;
-
- asm volatile (
- " lock; subl %2,%0 \n"
- " jz 1f \n"
- " lock; addl %2,%0 \n"
- " dec %1 \n"
- "1:"
- : "=m" (rw->lock), "=r" (rc) : "i" (RW_LOCK_BIAS), "1" (1)
- : "memory" );
-
- return rc;
+ return (cmpxchg(&rw->lock, 0, RW_WRITE_BIAS) == 0);
}
static always_inline void _raw_read_unlock(raw_rwlock_t *rw)
@@ -92,11 +70,11 @@ static always_inline void _raw_read_unlo
static always_inline void _raw_write_unlock(raw_rwlock_t *rw)
{
asm volatile (
- "lock ; addl %1,%0"
- : "=m" ((rw)->lock) : "i" (RW_LOCK_BIAS) : "memory" );
+ "lock ; subl %1,%0"
+ : "=m" ((rw)->lock) : "i" (RW_WRITE_BIAS) : "memory" );
}
-#define _raw_rw_is_locked(x) ((x)->lock < RW_LOCK_BIAS)
-#define _raw_rw_is_write_locked(x) ((x)->lock <= 0)
+#define _raw_rw_is_locked(x) ((x)->lock != 0)
+#define _raw_rw_is_write_locked(x) ((x)->lock > 0)
#endif /* __ASM_SPINLOCK_H */
Index: xen-4.1.2-testing/xen/include/xen/spinlock.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/xen/spinlock.h
+++ xen-4.1.2-testing/xen/include/xen/spinlock.h
@@ -157,6 +157,7 @@ unsigned long _read_lock_irqsave(rwlock_
void _read_unlock(rwlock_t *lock);
void _read_unlock_irq(rwlock_t *lock);
void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags);
+int _read_trylock(rwlock_t *lock);
void _write_lock(rwlock_t *lock);
void _write_lock_irq(rwlock_t *lock);
@@ -210,6 +211,7 @@ int _rw_is_write_locked(rwlock_t *lock);
#define read_unlock(l) _read_unlock(l)
#define read_unlock_irq(l) _read_unlock_irq(l)
#define read_unlock_irqrestore(l, f) _read_unlock_irqrestore(l, f)
+#define read_trylock(l) _read_trylock(l)
#define write_lock(l) _write_lock(l)
#define write_lock_irq(l) _write_lock_irq(l)
++++++ 23127-vtd-bios-settings.patch ++++++
# HG changeset patch
# User Allen Kay
# Date 1301755765 -3600
# Node ID 1046830079376a4b29fcad0cd037a834e808ed06
# Parent 89c23f58aa986092da0c9a7dfac1c41befbe1f3f
[VTD] check BIOS settings before enabling interrupt remapping or x2apic
Check flags field in ACPI DMAR structure before enabling interrupt
remapping or x2apic. This allows platform vendors to disable
interrupt remapping or x2apic features if on board BIOS does not
support them.
Signed-off-by: Allen Kay
# HG changeset patch
# User Allen Kay
# Date 1302077462 -3600
# Node ID c7916d6f4dfba9d6c7eeb0fc2796068d75e2fb4a
# Parent 42fa70e0761bbb0596618ca5323664f31a2faa76
[VTD] Fixes to ACPI DMAR flag checks.
* platform_supports_{intremap,x2apic} should not be marked __init as
they are used during S3 resume.
* DMAR flags should be taken from the table passed to
acpi_parse_dmar() -- this is the trusted copy of the DMAR, when
running in TXT mode.
Signed-off-by: Allen Kay
Index: xen-4.1.4-testing/xen/arch/x86/apic.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/apic.c
+++ xen-4.1.4-testing/xen/arch/x86/apic.c
@@ -572,7 +572,7 @@ static void resume_x2apic(void)
mask_8259A();
mask_IO_APIC_setup(ioapic_entries);
- iommu_enable_IR();
+ iommu_enable_x2apic_IR();
__enable_x2apic();
restore_IO_APIC_setup(ioapic_entries);
@@ -789,7 +789,7 @@ int lapic_suspend(void)
local_irq_save(flags);
disable_local_APIC();
- iommu_disable_IR();
+ iommu_disable_x2apic_IR();
local_irq_restore(flags);
return 0;
}
@@ -1035,7 +1035,7 @@ void __init x2apic_bsp_setup(void)
mask_8259A();
mask_IO_APIC_setup(ioapic_entries);
- if ( iommu_enable_IR() )
+ if ( iommu_enable_x2apic_IR() )
{
if ( x2apic_enabled )
panic("Interrupt remapping could not be enabled while "
Index: xen-4.1.4-testing/xen/drivers/passthrough/vtd/dmar.c
===================================================================
--- xen-4.1.4-testing.orig/xen/drivers/passthrough/vtd/dmar.c
+++ xen-4.1.4-testing/xen/drivers/passthrough/vtd/dmar.c
@@ -46,6 +46,7 @@ LIST_HEAD(acpi_rmrr_units);
LIST_HEAD(acpi_atsr_units);
LIST_HEAD(acpi_rhsa_units);
+static int __read_mostly dmar_flags;
static u64 igd_drhd_address;
u8 dmar_host_address_width;
@@ -684,6 +685,7 @@ static int __init acpi_parse_dmar(struct
int ret = 0;
dmar = (struct acpi_table_dmar *)table;
+ dmar_flags = dmar->flags;
if ( !iommu_enabled )
{
@@ -804,3 +806,22 @@ void acpi_dmar_zap(void)
dmar_table->signature[0] = 'X';
dmar_table->checksum -= 'X'-'D';
}
+
+int platform_supports_intremap(void)
+{
+ unsigned int flags = 0;
+
+ flags = DMAR_INTR_REMAP;
+ return ((dmar_flags & flags) == DMAR_INTR_REMAP);
+}
+
+int platform_supports_x2apic(void)
+{
+ unsigned int flags = 0;
+
+ if (!cpu_has_x2apic)
+ return 0;
+
+ flags = DMAR_INTR_REMAP | DMAR_X2APIC_OPT_OUT;
+ return ((dmar_flags & flags) == DMAR_INTR_REMAP);
+}
Index: xen-4.1.4-testing/xen/drivers/passthrough/vtd/extern.h
===================================================================
--- xen-4.1.4-testing.orig/xen/drivers/passthrough/vtd/extern.h
+++ xen-4.1.4-testing/xen/drivers/passthrough/vtd/extern.h
@@ -87,5 +87,7 @@ void vtd_ops_preamble_quirk(struct iommu
void vtd_ops_postamble_quirk(struct iommu* iommu);
void me_wifi_quirk(struct domain *domain, u8 bus, u8 devfn, int map);
void pci_vtd_quirk(struct pci_dev *pdev);
+int platform_supports_intremap(void);
+int platform_supports_x2apic(void);
#endif // _VTD_EXTERN_H_
Index: xen-4.1.4-testing/xen/drivers/passthrough/vtd/intremap.c
===================================================================
--- xen-4.1.4-testing.orig/xen/drivers/passthrough/vtd/intremap.c
+++ xen-4.1.4-testing/xen/drivers/passthrough/vtd/intremap.c
@@ -741,6 +741,13 @@ int enable_intremap(struct iommu *iommu,
ASSERT(ecap_intr_remap(iommu->ecap) && iommu_intremap);
+ if ( !platform_supports_intremap() )
+ {
+ dprintk(XENLOG_ERR VTDPREFIX,
+ "Platform firmware does not support interrupt remapping\n");
+ return -EINVAL;
+ }
+
ir_ctrl = iommu_ir_ctrl(iommu);
sts = dmar_readl(iommu->reg, DMAR_GSTS_REG);
@@ -847,10 +854,10 @@ out:
}
/*
- * This function is used to enable Interrutp remapping when
+ * This function is used to enable Interrupt remapping when
* enable x2apic
*/
-int iommu_enable_IR(void)
+int iommu_enable_x2apic_IR(void)
{
struct acpi_drhd_unit *drhd;
struct iommu *iommu;
@@ -858,6 +865,9 @@ int iommu_enable_IR(void)
if ( !iommu_supports_eim() )
return -1;
+ if ( !platform_supports_x2apic() )
+ return -1;
+
for_each_drhd_unit ( drhd )
{
struct qi_ctrl *qi_ctrl = NULL;
@@ -907,7 +917,7 @@ int iommu_enable_IR(void)
* This function is used to disable Interrutp remapping when
* suspend local apic
*/
-void iommu_disable_IR(void)
+void iommu_disable_x2apic_IR(void)
{
struct acpi_drhd_unit *drhd;
Index: xen-4.1.4-testing/xen/drivers/passthrough/vtd/iommu.c
===================================================================
--- xen-4.1.4-testing.orig/xen/drivers/passthrough/vtd/iommu.c
+++ xen-4.1.4-testing/xen/drivers/passthrough/vtd/iommu.c
@@ -2097,7 +2097,7 @@ static int init_vtd_hw(void)
{
iommu_intremap = 0;
dprintk(XENLOG_WARNING VTDPREFIX,
- "Failed to enable Interrupt Remapping!\n");
+ "Interrupt Remapping not enabled\n");
break;
}
}
Index: xen-4.1.4-testing/xen/drivers/passthrough/vtd/iommu.h
===================================================================
--- xen-4.1.4-testing.orig/xen/drivers/passthrough/vtd/iommu.h
+++ xen-4.1.4-testing/xen/drivers/passthrough/vtd/iommu.h
@@ -22,6 +22,10 @@
#include
+/* DMAR Flags bits */
+#define DMAR_INTR_REMAP 0x1
+#define DMAR_X2APIC_OPT_OUT 0x2
+
/*
* Intel IOMMU register specification per version 1.0 public spec.
*/
Index: xen-4.1.4-testing/xen/include/xen/iommu.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/xen/iommu.h
+++ xen-4.1.4-testing/xen/include/xen/iommu.h
@@ -66,8 +66,8 @@ struct iommu {
int iommu_setup(void);
int iommu_supports_eim(void);
-int iommu_enable_IR(void);
-void iommu_disable_IR(void);
+int iommu_enable_x2apic_IR(void);
+void iommu_disable_x2apic_IR(void);
int iommu_add_device(struct pci_dev *pdev);
int iommu_remove_device(struct pci_dev *pdev);
++++++ 23128-xentrace_correct_formula_to_calculate_t_info_pages.patch ++++++
changeset: 23128:4a335f1000ea
user: Olaf Hering
date: Sat Apr 02 15:50:19 2011 +0100
files: xen/common/trace.c
description:
xentrace: correct formula to calculate t_info_pages
The current formula to calculate t_info_pages, based on the initial
code, is slightly incorrect. It may allocate more than needed.
Each cpu has some pages/mfns stored as uint32_t.
That list is stored with an offset at tinfo.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -110,7 +110,7 @@ static int calculate_tbuf_size(unsigned
{
struct t_buf dummy;
typeof(dummy.prod) size;
- unsigned int t_info_words, t_info_bytes;
+ unsigned int t_info_words;
/* force maximum value for an unsigned type */
size = -1;
@@ -125,9 +125,8 @@ static int calculate_tbuf_size(unsigned
pages = size;
}
- t_info_words = num_online_cpus() * pages + t_info_first_offset;
- t_info_bytes = t_info_words * sizeof(uint32_t);
- t_info_pages = PFN_UP(t_info_bytes);
+ t_info_words = num_online_cpus() * pages * sizeof(uint32_t);
+ t_info_pages = PFN_UP(t_info_first_offset + t_info_words);
printk(XENLOG_INFO "xentrace: requesting %u t_info pages "
"for %u trace pages on %u cpus\n",
t_info_pages, pages, num_online_cpus());
++++++ 23129-xentrace_remove_unneeded_debug_printk.patch ++++++
changeset: 23129:219ba19aedeb
user: Olaf Hering
date: Sat Apr 02 15:50:47 2011 +0100
files: xen/common/trace.c
description:
xentrace: remove unneeded debug printk
The pointer value in case of an allocation failure is rather
uninteresting.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 1 -
1 file changed, 1 deletion(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -238,7 +238,6 @@ out_dealloc:
{
void *rawbuf = per_cpu(t_bufs, cpu);
per_cpu(t_bufs, cpu) = NULL;
- printk(XENLOG_DEBUG "xentrace: cpu %d p %p\n", cpu, rawbuf);
if ( rawbuf )
{
ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
++++++ 23173-xentrace_Move_register_cpu_notifier_call_into_boot-time_init..patch ++++++
changeset: 23173:94cef9aaf0cd
user: Keir Fraser
date: Wed Apr 06 15:52:50 2011 +0100
files: xen/common/trace.c
description:
xentrace: Move register_cpu_notifier() call into boot-time init.
We can't do it lazily from alloc_trace_bufs() as that gets called
later if tracing is enabled later by dom0.
Signed-off-by: Keir Fraser
---
xen/common/trace.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -225,8 +225,6 @@ static int alloc_trace_bufs(unsigned int
t_buf_highwater = data_size >> 1; /* 50% high water */
opt_tbuf_size = pages;
- register_cpu_notifier(&cpu_nfb);
-
printk("xentrace: initialised\n");
wmb(); /* above must be visible before tb_init_done flag set */
tb_init_done = 1;
@@ -309,6 +307,8 @@ int trace_will_trace_event(u32 event)
*/
void __init init_trace_bufs(void)
{
+ register_cpu_notifier(&cpu_nfb);
+
if ( opt_tbuf_size && alloc_trace_bufs(opt_tbuf_size) )
{
printk(XENLOG_INFO "xentrace: allocation size %d failed, disabling\n",
++++++ 23199-amd-iommu-unmapped-intr-fault.patch ++++++
# HG changeset patch
# User Wei Wang
# Date 1302610857 -3600
# Node ID dbd98ab2f87facba8117bb881fa2ea5dfdb92960
# Parent 697ac895c11c6d5d82524de56796cee98fded2a5
amd iommu: Unmapped interrupt should generate IO page faults.
This helps us to debug interrupt issues.
Signed-off-by: Wei Wang
Index: xen-4.1.2-testing/xen/drivers/passthrough/amd/iommu_map.c
===================================================================
--- xen-4.1.2-testing.orig/xen/drivers/passthrough/amd/iommu_map.c
+++ xen-4.1.2-testing/xen/drivers/passthrough/amd/iommu_map.c
@@ -327,8 +327,9 @@ void amd_iommu_set_intremap_table(u32 *d
set_field_in_reg_u32(0xB, entry,
IOMMU_DEV_TABLE_INT_TABLE_LENGTH_MASK,
IOMMU_DEV_TABLE_INT_TABLE_LENGTH_SHIFT, &entry);
- /* ignore unmapped interrupts */
- set_field_in_reg_u32(IOMMU_CONTROL_ENABLED, entry,
+
+ /* unmapped interrupt results io page faults*/
+ set_field_in_reg_u32(IOMMU_CONTROL_DISABLED, entry,
IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_MASK,
IOMMU_DEV_TABLE_INT_TABLE_IGN_UNMAPPED_SHIFT, &entry);
set_field_in_reg_u32(int_valid ? IOMMU_CONTROL_ENABLED :
++++++ 23236-svm-decode-assist-invlpg.patch ++++++
References: FATE#309900
# HG changeset patch
# User Christoph Egger
# Date 1302700499 -3600
# Node ID 3b2182100ba2fa5c4a3a450e473717e2300aa8f1
# Parent 2284c79b606ac14ef5c5bc2c1cce62188b5bd9ee
x86/svm/asid: Introduce svm_invlpga()
Signed-off-by: Christoph Egger
# HG changeset patch
# User Andre Przywara
# Date 1303117597 -3600
# Node ID e324c4d1dd6eeb9417fec513640ca795bd0f5dd4
# Parent 2c8ad607ece18b4740b9fc4ffe267a0e0893c141
svm: implement INVLPG part of DecodeAssist
Newer SVM implementations (Bulldozer) give the desired address on
a INVLPG intercept explicitly in the EXITINFO1 field of the VMCB.
Use this address to avoid a costly instruction fetch and decode
cycle.
Signed-off-by: Andre Przywara
# HG changeset patch
# User Christoph Egger
# Date 1305187246 -3600
# Node ID 19d6541c4abec3486c83de76102ec46d7fe22a16
# Parent b6e8e916ed2827fb1329de0de2e23ee5b6b78662
nestedsvm: update rip on invlpga intercept
Fixes endless loop.
Signed-off-by: Christoph Egger
Index: xen-4.1.4-testing/xen/arch/x86/hvm/svm/emulate.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/hvm/svm/emulate.c
+++ xen-4.1.4-testing/xen/arch/x86/hvm/svm/emulate.c
@@ -102,6 +102,7 @@ MAKE_INSTR(INT3, 1, 0xcc);
MAKE_INSTR(RDTSC, 2, 0x0f, 0x31);
MAKE_INSTR(PAUSE, 1, 0x90);
MAKE_INSTR(XSETBV, 3, 0x0f, 0x01, 0xd1);
+MAKE_INSTR(INVLPGA,3, 0x0f, 0x01, 0xdf);
static const u8 *opc_bytes[INSTR_MAX_COUNT] =
{
@@ -116,6 +117,7 @@ static const u8 *opc_bytes[INSTR_MAX_COU
[INSTR_RDTSC] = OPCODE_RDTSC,
[INSTR_PAUSE] = OPCODE_PAUSE,
[INSTR_XSETBV] = OPCODE_XSETBV,
+ [INSTR_INVLPGA] = OPCODE_INVLPGA,
};
static int fetch(struct vcpu *v, u8 *buf, unsigned long addr, int len)
Index: xen-4.1.4-testing/xen/arch/x86/hvm/svm/svm.c
===================================================================
--- xen-4.1.4-testing.orig/xen/arch/x86/hvm/svm/svm.c
+++ xen-4.1.4-testing/xen/arch/x86/hvm/svm/svm.c
@@ -1772,8 +1772,10 @@ void svm_vmexit_handler(struct cpu_user_
break;
case VMEXIT_INVLPGA:
- if ( !handle_mmio() )
- hvm_inject_exception(TRAP_gp_fault, 0, 0);
+ if ( (inst_len = __get_instruction_length(v, INSTR_INVLPGA)) == 0 )
+ break;
+ svm_invlpga(regs->eax, v->arch.hvm_vcpu.asid);
+ __update_guest_eip(regs, inst_len);
break;
case VMEXIT_VMMCALL:
Index: xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/asid.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/asm-x86/hvm/svm/asid.h
+++ xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/asid.h
@@ -34,10 +34,7 @@ static inline void svm_asid_g_invlpg(str
{
#if 0
/* Optimization? */
- asm volatile (".byte 0x0F,0x01,0xDF \n"
- : /* output */
- : /* input */
- "a" (g_vaddr), "c"(v->arch.hvm_svm.vmcb->guest_asid) );
+ svm_invlpga(g_vaddr, v->arch.hvm_svm.vmcb->guest_asid);
#endif
/* Safe fallback. Take a new ASID. */
Index: xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/emulate.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/asm-x86/hvm/svm/emulate.h
+++ xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/emulate.h
@@ -33,6 +33,7 @@ enum instruction_index {
INSTR_RDTSC,
INSTR_PAUSE,
INSTR_XSETBV,
+ INSTR_INVLPGA,
INSTR_MAX_COUNT /* Must be last - Number of instructions supported */
};
Index: xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/svm.h
===================================================================
--- xen-4.1.4-testing.orig/xen/include/asm-x86/hvm/svm/svm.h
+++ xen-4.1.4-testing/xen/include/asm-x86/hvm/svm/svm.h
@@ -62,6 +62,15 @@ static inline void svm_vmsave(void *vmcb
: : "a" (__pa(vmcb)) : "memory" );
}
+static inline void svm_invlpga(unsigned long vaddr, uint32_t asid)
+{
+ asm volatile (
+ ".byte 0x0f,0x01,0xdf"
+ : /* output */
+ : /* input */
+ "a" (vaddr), "c" (asid));
+}
+
extern u32 svm_feature_flags;
#define SVM_FEATURE_NPT 0 /* Nested page table support */
++++++ 23239-xentrace_correct_overflow_check_for_number_of_per-cpu_trace_pages.patch ++++++
changeset: 23239:51d89366c859
user: Olaf Hering
date: Mon Apr 18 15:12:04 2011 +0100
files: xen/common/trace.c
description:
xentrace: correct overflow check for number of per-cpu trace pages
The calculated number of per-cpu trace pages is stored in t_info and
shared with tools like xentrace. Since its an u16 the value may
overflow because the current check is based on u32. Using the u16
means each cpu could in theory use up to 256MB as trace
buffer. However such a large allocation will currently fail on x86 due
to the MAX_ORDER limit. Check both max theoretical number of pages
per cpu and max number of pages reachable by struct t_buf->prod/cons
variables with requested number of pages.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 22 +++++++++++++++-------
1 file changed, 15 insertions(+), 7 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -104,25 +104,33 @@ static void calc_tinfo_first_offset(void
* calculate_tbuf_size - check to make sure that the proposed size will fit
* in the currently sized struct t_info and allows prod and cons to
* reach double the value without overflow.
+ * The t_info layout is fixed and cant be changed without breaking xentrace.
* Initialize t_info_pages based on number of trace pages.
*/
static int calculate_tbuf_size(unsigned int pages)
{
- struct t_buf dummy;
- typeof(dummy.prod) size;
+ struct t_buf dummy_size;
+ typeof(dummy_size.prod) max_size;
+ struct t_info dummy_pages;
+ typeof(dummy_pages.tbuf_size) max_pages;
unsigned int t_info_words;
/* force maximum value for an unsigned type */
- size = -1;
+ max_size = -1;
+ max_pages = -1;
/* max size holds up to n pages */
- size /= PAGE_SIZE;
- if ( pages > size )
+ max_size /= PAGE_SIZE;
+
+ if ( max_size < max_pages )
+ max_pages = max_size;
+
+ if ( pages > max_pages )
{
printk(XENLOG_INFO "xentrace: requested number of %u pages "
"reduced to %u\n",
- pages, (unsigned int)size);
- pages = size;
+ pages, max_pages);
+ pages = max_pages;
}
t_info_words = num_online_cpus() * pages * sizeof(uint32_t);
++++++ 23246-x86-xsave-enable.patch ++++++
References: bnc#718668
# HG changeset patch
# User Tim Deegan
# Date 1303297371 -3600
# Node ID eb4505f8dd97f894ee4b4e1b55ea1272c05e6759
# Parent 3539ef956a378ad7fe39654ff9aca5b0e7bf8843
xen/x86: re-enable xsave by default now that it supports live migration.
Signed-off-by: Tim Deegan
Index: xen-4.1.3-testing/xen/arch/x86/cpu/common.c
===================================================================
--- xen-4.1.3-testing.orig/xen/arch/x86/cpu/common.c
+++ xen-4.1.3-testing/xen/arch/x86/cpu/common.c
@@ -25,7 +25,7 @@ boolean_param("nofxsr", disable_x86_fxsr
static bool_t __cpuinitdata disable_x86_serial_nr;
boolean_param("noserialnumber", disable_x86_serial_nr);
-static bool_t __cpuinitdata use_xsave;
+static bool_t __cpuinitdata use_xsave = 1;
boolean_param("xsave", use_xsave);
unsigned int __devinitdata opt_cpuid_mask_ecx = ~0u;
++++++ 23303-cpufreq-misc.patch ++++++
# HG changeset patch
# User Jan Beulich
# Date 1304930762 -3600
# Node ID 82180954eda9cfe279e7ecf8c9ed4ffa29796bfb
# Parent c822888f36568f26e95f9844c7f0c5e06df7aa20
misc cpufreq cleanup
- proper handling of governor command line options when using the
default governor
- warning message for unrecognized command line options
- replacing a NR_CPUS sized array with per-CPU data
- a couple of __read_mostly annotations
Signed-off-by: Jan Beulich
Index: xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq.c
===================================================================
--- xen-4.1.2-testing.orig/xen/drivers/cpufreq/cpufreq.c
+++ xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq.c
@@ -47,7 +47,8 @@
#include
#include
-static unsigned int usr_max_freq, usr_min_freq;
+static unsigned int __read_mostly usr_min_freq;
+static unsigned int __read_mostly usr_max_freq;
static void cpufreq_cmdline_common_para(struct cpufreq_policy *new_policy);
struct cpufreq_dom {
@@ -57,7 +58,7 @@ struct cpufreq_dom {
};
static LIST_HEAD(cpufreq_dom_list_head);
-struct cpufreq_governor *cpufreq_opt_governor;
+struct cpufreq_governor *__read_mostly cpufreq_opt_governor;
LIST_HEAD(cpufreq_governor_list);
bool_t __read_mostly cpufreq_verbose;
@@ -543,6 +544,7 @@ void __init cpufreq_cmdline_parse(char *
{
static struct cpufreq_governor *__initdata cpufreq_governors[] =
{
+ CPUFREQ_DEFAULT_GOVERNOR,
&cpufreq_gov_userspace,
&cpufreq_gov_dbs,
&cpufreq_gov_performance,
@@ -576,8 +578,10 @@ void __init cpufreq_cmdline_parse(char *
}
if (str && !cpufreq_handle_common_option(str, val) &&
- cpufreq_governors[gov_index]->handle_option)
- cpufreq_governors[gov_index]->handle_option(str, val);
+ (!cpufreq_governors[gov_index]->handle_option ||
+ !cpufreq_governors[gov_index]->handle_option(str, val)))
+ printk(XENLOG_WARNING "cpufreq/%s: option '%s' not recognized\n",
+ cpufreq_governors[gov_index]->name, str);
str = end;
} while (str);
Index: xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq_misc_governors.c
===================================================================
--- xen-4.1.2-testing.orig/xen/drivers/cpufreq/cpufreq_misc_governors.c
+++ xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq_misc_governors.c
@@ -14,14 +14,17 @@
*
*/
+#include
#include
+#include
#include
#include
/*
* cpufreq userspace governor
*/
-static unsigned int cpu_set_freq[NR_CPUS];
+static unsigned int __read_mostly userspace_cmdline_freq;
+static DEFINE_PER_CPU(unsigned int, cpu_set_freq);
static int cpufreq_governor_userspace(struct cpufreq_policy *policy,
unsigned int event)
@@ -35,21 +38,21 @@ static int cpufreq_governor_userspace(st
switch (event) {
case CPUFREQ_GOV_START:
- if (!cpu_set_freq[cpu])
- cpu_set_freq[cpu] = policy->cur;
+ if (!per_cpu(cpu_set_freq, cpu))
+ per_cpu(cpu_set_freq, cpu) = policy->cur;
break;
case CPUFREQ_GOV_STOP:
- cpu_set_freq[cpu] = 0;
+ per_cpu(cpu_set_freq, cpu) = 0;
break;
case CPUFREQ_GOV_LIMITS:
- if (policy->max < cpu_set_freq[cpu])
+ if (policy->max < per_cpu(cpu_set_freq, cpu))
ret = __cpufreq_driver_target(policy, policy->max,
CPUFREQ_RELATION_H);
- else if (policy->min > cpu_set_freq[cpu])
+ else if (policy->min > per_cpu(cpu_set_freq, cpu))
ret = __cpufreq_driver_target(policy, policy->min,
CPUFREQ_RELATION_L);
else
- ret = __cpufreq_driver_target(policy, cpu_set_freq[cpu],
+ ret = __cpufreq_driver_target(policy, per_cpu(cpu_set_freq, cpu),
CPUFREQ_RELATION_L);
break;
@@ -68,7 +71,7 @@ int write_userspace_scaling_setspeed(uns
if (!cpu_online(cpu) || !(policy = per_cpu(cpufreq_cpu_policy, cpu)))
return -EINVAL;
- cpu_set_freq[cpu] = freq;
+ per_cpu(cpu_set_freq, cpu) = freq;
if (freq < policy->min)
freq = policy->min;
@@ -78,19 +81,35 @@ int write_userspace_scaling_setspeed(uns
return __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L);
}
-static void __init
+static bool_t __init
cpufreq_userspace_handle_option(const char *name, const char *val)
{
if (!strcmp(name, "speed") && val) {
- unsigned int usr_cmdline_freq;
- unsigned int cpu;
+ userspace_cmdline_freq = simple_strtoul(val, NULL, 0);
+ return 1;
+ }
+ return 0;
+}
- usr_cmdline_freq = simple_strtoul(val, NULL, 0);
- for (cpu = 0; cpu < NR_CPUS; cpu++)
- cpu_set_freq[cpu] = usr_cmdline_freq;
+static int cpufreq_userspace_cpu_callback(
+ struct notifier_block *nfb, unsigned long action, void *hcpu)
+{
+ unsigned int cpu = (unsigned long)hcpu;
+
+ switch (action)
+ {
+ case CPU_UP_PREPARE:
+ per_cpu(cpu_set_freq, cpu) = userspace_cmdline_freq;
+ break;
}
+
+ return NOTIFY_DONE;
}
+static struct notifier_block cpufreq_userspace_cpu_nfb = {
+ .notifier_call = cpufreq_userspace_cpu_callback
+};
+
struct cpufreq_governor cpufreq_gov_userspace = {
.name = "userspace",
.governor = cpufreq_governor_userspace,
@@ -99,6 +118,11 @@ struct cpufreq_governor cpufreq_gov_user
static int __init cpufreq_gov_userspace_init(void)
{
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu)
+ per_cpu(cpu_set_freq, cpu) = userspace_cmdline_freq;
+ register_cpu_notifier(&cpufreq_userspace_cpu_nfb);
return cpufreq_register_governor(&cpufreq_gov_userspace);
}
__initcall(cpufreq_gov_userspace_init);
Index: xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq_ondemand.c
===================================================================
--- xen-4.1.2-testing.orig/xen/drivers/cpufreq/cpufreq_ondemand.c
+++ xen-4.1.2-testing/xen/drivers/cpufreq/cpufreq_ondemand.c
@@ -296,7 +296,7 @@ int cpufreq_governor_dbs(struct cpufreq_
return 0;
}
-static void __init cpufreq_dbs_handle_option(const char *name, const char *val)
+static bool_t __init cpufreq_dbs_handle_option(const char *name, const char *val)
{
if ( !strcmp(name, "rate") && val )
{
@@ -334,6 +334,9 @@ static void __init cpufreq_dbs_handle_op
}
dbs_tuners_ins.powersave_bias = tmp;
}
+ else
+ return 0;
+ return 1;
}
struct cpufreq_governor cpufreq_gov_dbs = {
Index: xen-4.1.2-testing/xen/include/acpi/cpufreq/cpufreq.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/acpi/cpufreq/cpufreq.h
+++ xen-4.1.2-testing/xen/include/acpi/cpufreq/cpufreq.h
@@ -93,7 +93,7 @@ struct cpufreq_governor {
char name[CPUFREQ_NAME_LEN];
int (*governor)(struct cpufreq_policy *policy,
unsigned int event);
- void (*handle_option)(const char *name, const char *value);
+ bool_t (*handle_option)(const char *name, const char *value);
struct list_head governor_list;
};
++++++ 23308-xentrace_Move_the_global_variable_t_info_first_offset_into_calculate_tbuf_size.patch ++++++
changeset: 23308:fb5313e64335
user: Olaf Hering
date: Mon May 09 09:58:36 2011 +0100
files: xen/common/trace.c
description:
xentrace: Move the global variable t_info_first_offset into calculate_tbuf_size()
Move the global variable t_info_first_offset into
calculate_tbuf_size() because it is only used there. Change the type
from u32 to uint32_t to match type in other places.
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -55,7 +55,6 @@ static DEFINE_PER_CPU_READ_MOSTLY(struct
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
static u32 data_size;
-static u32 t_info_first_offset __read_mostly;
/* High water mark for trace buffers; */
/* Send virtual interrupt when buffer level reaches this point */
@@ -94,10 +93,10 @@ static struct notifier_block cpu_nfb = {
.notifier_call = cpu_callback
};
-static void calc_tinfo_first_offset(void)
+static uint32_t calc_tinfo_first_offset(void)
{
int offset_in_bytes = offsetof(struct t_info, mfn_offset[NR_CPUS]);
- t_info_first_offset = fit_to_type(uint32_t, offset_in_bytes);
+ return fit_to_type(uint32_t, offset_in_bytes);
}
/**
@@ -107,7 +106,7 @@ static void calc_tinfo_first_offset(void
* The t_info layout is fixed and cant be changed without breaking xentrace.
* Initialize t_info_pages based on number of trace pages.
*/
-static int calculate_tbuf_size(unsigned int pages)
+static int calculate_tbuf_size(unsigned int pages, uint32_t t_info_first_offset)
{
struct t_buf dummy_size;
typeof(dummy_size.prod) max_size;
@@ -156,6 +155,7 @@ static int alloc_trace_bufs(unsigned int
int i, cpu, order;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list;
+ uint32_t t_info_first_offset;
int offset;
if ( t_info )
@@ -165,9 +165,9 @@ static int alloc_trace_bufs(unsigned int
return -EINVAL;
/* Calculate offset in u32 of first mfn */
- calc_tinfo_first_offset();
+ t_info_first_offset = calc_tinfo_first_offset();
- pages = calculate_tbuf_size(pages);
+ pages = calculate_tbuf_size(pages, t_info_first_offset);
order = get_order_from_pages(pages);
t_info = alloc_xenheap_pages(get_order_from_pages(t_info_pages), 0);
++++++ 23309-xentrace_Mark_data_size___read_mostly_because_its_only_written_once.patch ++++++
changeset: 23309:0ddcc8063690
user: Olaf Hering
date: Mon May 09 09:59:13 2011 +0100
files: xen/common/trace.c
description:
xentrace: Mark data_size __read_mostly because it's only written once
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -54,7 +54,7 @@ static unsigned int t_info_pages;
static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
-static u32 data_size;
+static u32 data_size __read_mostly;
/* High water mark for trace buffers; */
/* Send virtual interrupt when buffer level reaches this point */
++++++ 23310-xentrace_Remove_unneeded_cast_when_assigning_pointer_value_to_dst.patch ++++++
changeset: 23310:b7ca55907bd3
user: Olaf Hering
date: Mon May 09 09:59:50 2011 +0100
files: xen/common/trace.c
description:
xentrace: Remove unneeded cast when assigning pointer value to dst
Remove unneeded cast when assigning pointer value to dst.
Both arrays are uint32_t and memcpy takes a void pointer.
Signed-off-by: Olaf Hering
---
xen/common/trace.c | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -483,7 +483,7 @@ static inline void __insert_record(struc
const void *extra_data)
{
struct t_rec *rec;
- unsigned char *dst;
+ uint32_t *dst;
unsigned int extra_word = extra / sizeof(u32);
unsigned int local_rec_size = calc_rec_size(cycles, extra);
uint32_t next;
@@ -508,13 +508,13 @@ static inline void __insert_record(struc
rec->event = event;
rec->extra_u32 = extra_word;
- dst = (unsigned char *)rec->u.nocycles.extra_u32;
+ dst = rec->u.nocycles.extra_u32;
if ( (rec->cycles_included = cycles) != 0 )
{
u64 tsc = (u64)get_cycles();
rec->u.cycles.cycles_lo = (uint32_t)tsc;
rec->u.cycles.cycles_hi = (uint32_t)(tsc >> 32);
- dst = (unsigned char *)rec->u.cycles.extra_u32;
+ dst = rec->u.cycles.extra_u32;
}
if ( extra_data && extra )
++++++ 23334-amd-fam12+14-vpmu.patch ++++++
References: FATE#309902, FATE#309903
# HG changeset patch
# User Jacob Shin
# Date 1305188046 -3600
# Node ID 23e33ea79cac0303c729d4e82905054cded16348
# Parent fabdd682420c0c7b5e81f07f2f54211ebc11babe
hvm: vpmu: Enable HVM VPMU for AMD Family 12h and 14h processors
HVM VPMU support can be enabled for AMD Family 12h and 14h processors
by taking the same code path as 10h.
Signed-off-by: Jacob Shin
Index: xen-4.1.2-testing/xen/arch/x86/hvm/svm/vpmu.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/svm/vpmu.c
+++ xen-4.1.2-testing/xen/arch/x86/hvm/svm/vpmu.c
@@ -317,6 +317,8 @@ static void amd_vpmu_initialise(struct v
k7_counters_mirrored = 1;
break;
case 0x10:
+ case 0x12:
+ case 0x14:
default:
num_counters = F10H_NUM_COUNTERS;
counters = AMD_F10H_COUNTERS;
Index: xen-4.1.2-testing/xen/arch/x86/hvm/vpmu.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/hvm/vpmu.c
+++ xen-4.1.2-testing/xen/arch/x86/hvm/vpmu.c
@@ -101,6 +101,8 @@ void vpmu_initialise(struct vcpu *v)
switch ( family )
{
case 0x10:
+ case 0x12:
+ case 0x14:
case 0x15:
vpmu->arch_vpmu_ops = &amd_vpmu_ops;
break;
++++++ 23383-libxc-rm-static-vars.patch ++++++
# HG changeset patch
# User Ian Campbell
# Date 1306228450 -3600
# Node ID 23b423a3955785c9a8679c3a877c3472066a2e1f
# Parent ba8da39c67298b19c2c277e5794981b7f23bedf2
libxc: save/restore: remove static context variables
20544:ad9d75d74bd5 and 20545:cc7d66ba0dad seemingly intended to change these
global static variables into stack variables but didn't remove the static
qualifier.
Also zero the entire struct once with memset rather than clearing fields
piecemeal in two different places.
Signed-off-by: Ian Campbell
Acked-by: Ian Jackson
Committed-by: Ian Jackson
Acked-by: Vincent Hanquez
Index: xen-4.1.3-testing/tools/libxc/xc_domain_restore.c
===================================================================
--- xen-4.1.3-testing.orig/tools/libxc/xc_domain_restore.c
+++ xen-4.1.3-testing/tools/libxc/xc_domain_restore.c
@@ -1145,23 +1145,19 @@ int xc_domain_restore(xc_interface *xch,
int orig_io_fd_flags;
- static struct restore_ctx _ctx = {
- .live_p2m = NULL,
- .p2m = NULL,
- };
- static struct restore_ctx *ctx = &_ctx;
+ struct restore_ctx _ctx;
+ struct restore_ctx *ctx = &_ctx;
struct domain_info_context *dinfo = &ctx->dinfo;
pagebuf_init(&pagebuf);
memset(&tailbuf, 0, sizeof(tailbuf));
tailbuf.ishvm = hvm;
- /* For info only */
- ctx->nr_pfns = 0;
-
if ( superpages )
return 1;
+ memset(ctx, 0, sizeof(*ctx));
+
ctxt = xc_hypercall_buffer_alloc(xch, ctxt, sizeof(*ctxt));
if ( ctxt == NULL )
Index: xen-4.1.3-testing/tools/libxc/xc_domain_save.c
===================================================================
--- xen-4.1.3-testing.orig/tools/libxc/xc_domain_save.c
+++ xen-4.1.3-testing/tools/libxc/xc_domain_save.c
@@ -958,11 +958,8 @@ int xc_domain_save(xc_interface *xch, in
unsigned long mfn;
struct outbuf ob;
- static struct save_ctx _ctx = {
- .live_p2m = NULL,
- .live_m2p = NULL,
- };
- static struct save_ctx *ctx = &_ctx;
+ struct save_ctx _ctx;
+ struct save_ctx *ctx = &_ctx;
struct domain_info_context *dinfo = &ctx->dinfo;
int completed = 0;
@@ -976,6 +973,8 @@ int xc_domain_save(xc_interface *xch, in
outbuf_init(xch, &ob, OUTBUF_SIZE);
+ memset(ctx, 0, sizeof(*ctx));
+
/* If no explicit control parameters given, use defaults */
max_iters = max_iters ? : DEF_MAX_ITERS;
max_factor = max_factor ? : DEF_MAX_FACTOR;
++++++ 23404-xentrace_reduce_trace_buffer_size_to_something_mfn_offset_can_reach.patch ++++++
changeset: 23404:dd0eb070ee44
user: Olaf Hering
date: Thu May 26 12:34:44 2011 +0100
files: xen/common/trace.c
description:
xentrace: reduce trace buffer size to something mfn_offset can reach
The start of the array which holds the list of mfns for each cpus
tracebuffer is stored in an unsigned short. This limits the total
amount of pages for each cpu as the number of active cpus increases.
Update the math in calculate_tbuf_size() to apply also this rule to
the max number of trace pages. Without this change the index can
overflow.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
# Commit d9fb28ae6d41c8201482948660e52889481830dd
# Date 2013-03-04 13:42:17 +0100
# Author Olaf Hering
# Committer Jan Beulich
xentrace: fix off-by-one in calculate_tbuf_size
Commit "xentrace: reduce trace buffer size to something mfn_offset can
reach" contains an off-by-one bug. max_mfn_offset needs to be reduced by
exactly the value of t_info_first_offset.
If the system has two cpus and the number of requested trace pages is
very large, the final number of trace pages + the offset will not fit
into a short. As a result the variable offset in alloc_trace_bufs() will
wrap while allocating buffers for the second cpu. Later
share_xen_page_with_privileged_guests() will be called with a wrong page
and the ASSERT in this function triggers. If the ASSERT is ignored by
running a non-dbg hypervisor the asserts in xentrace itself trigger
because "cons" is not aligned because the very last trace page for the
second cpu is a random mfn.
Thanks to Jan for the quick analysis.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -112,11 +112,14 @@ static int calculate_tbuf_size(unsigned
typeof(dummy_size.prod) max_size;
struct t_info dummy_pages;
typeof(dummy_pages.tbuf_size) max_pages;
+ typeof(dummy_pages.mfn_offset[0]) max_mfn_offset;
+ unsigned int max_cpus = num_online_cpus();
unsigned int t_info_words;
/* force maximum value for an unsigned type */
max_size = -1;
max_pages = -1;
+ max_mfn_offset = -1;
/* max size holds up to n pages */
max_size /= PAGE_SIZE;
@@ -124,6 +127,18 @@ static int calculate_tbuf_size(unsigned
if ( max_size < max_pages )
max_pages = max_size;
+ /*
+ * max mfn_offset holds up to n pages per cpu
+ * The array of mfns for the highest cpu can start at the maximum value
+ * mfn_offset can hold. So reduce the number of cpus and also the mfn_offset.
+ */
+ max_mfn_offset -= t_info_first_offset;
+ max_cpus--;
+ if ( max_cpus )
+ max_mfn_offset /= max_cpus;
+ if ( max_mfn_offset < max_pages )
+ max_pages = max_mfn_offset;
+
if ( pages > max_pages )
{
printk(XENLOG_INFO "xentrace: requested number of %u pages "
++++++ 23405-xentrace_fix_type_of_offset_to_avoid_ouf-of-bounds_access.patch ++++++
changeset: 23405:3057b531d905
user: Olaf Hering
date: Thu May 26 12:35:30 2011 +0100
files: xen/common/trace.c
description:
xentrace: fix type of offset to avoid ouf-of-bounds access
Update the type of the local offset variable to match the type where
this variable is stored. Also update the type of t_info_first_offset
because it has also a limited range.
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -106,7 +106,7 @@ static uint32_t calc_tinfo_first_offset(
* The t_info layout is fixed and cant be changed without breaking xentrace.
* Initialize t_info_pages based on number of trace pages.
*/
-static int calculate_tbuf_size(unsigned int pages, uint32_t t_info_first_offset)
+static int calculate_tbuf_size(unsigned int pages, uint16_t t_info_first_offset)
{
struct t_buf dummy_size;
typeof(dummy_size.prod) max_size;
@@ -170,8 +170,8 @@ static int alloc_trace_bufs(unsigned int
int i, cpu, order;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list;
- uint32_t t_info_first_offset;
- int offset;
+ uint16_t t_info_first_offset;
+ uint16_t offset;
if ( t_info )
return -EBUSY;
@@ -179,7 +179,7 @@ static int alloc_trace_bufs(unsigned int
if ( pages == 0 )
return -EINVAL;
- /* Calculate offset in u32 of first mfn */
+ /* Calculate offset in units of u32 of first mfn */
t_info_first_offset = calc_tinfo_first_offset();
pages = calculate_tbuf_size(pages, t_info_first_offset);
++++++ 23406-xentrace_update___insert_record_to_copy_the_trace_record_to_individual_mfns.patch ++++++
changeset: 23406:956438803307
user: Olaf Hering
date: Thu May 26 12:36:03 2011 +0100
files: xen/common/trace.c
description:
xentrace: update __insert_record() to copy the trace record to individual mfns
Update __insert_record() to copy the trace record to individual mfns.
This is a prereq before changing the per-cpu allocation from
contiguous to non-contiguous allocation.
v2:
update offset calculation to use shift and mask
update type of mfn_offset to match type of data source
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 71 +++++++++++++++++++++++++++++++++++++++++------------
1 file changed, 55 insertions(+), 16 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -52,7 +52,6 @@ static struct t_info *t_info;
static unsigned int t_info_pages;
static DEFINE_PER_CPU_READ_MOSTLY(struct t_buf *, t_bufs);
-static DEFINE_PER_CPU_READ_MOSTLY(unsigned char *, t_data);
static DEFINE_PER_CPU_READ_MOSTLY(spinlock_t, t_lock);
static u32 data_size __read_mostly;
@@ -208,7 +207,6 @@ static int alloc_trace_bufs(unsigned int
per_cpu(t_bufs, cpu) = buf = rawbuf;
buf->cons = buf->prod = 0;
- per_cpu(t_data, cpu) = (unsigned char *)(buf + 1);
}
offset = t_info_first_offset;
@@ -472,10 +470,16 @@ static inline u32 calc_bytes_avail(const
return data_size - calc_unconsumed_bytes(buf);
}
-static inline struct t_rec *next_record(const struct t_buf *buf,
- uint32_t *next)
+static unsigned char *next_record(const struct t_buf *buf, uint32_t *next,
+ unsigned char **next_page,
+ uint32_t *offset_in_page)
{
u32 x = buf->prod, cons = buf->cons;
+ uint16_t per_cpu_mfn_offset;
+ uint32_t per_cpu_mfn_nr;
+ uint32_t *mfn_list;
+ uint32_t mfn;
+ unsigned char *this_page;
barrier(); /* must read buf->prod and buf->cons only once */
*next = x;
@@ -487,7 +491,27 @@ static inline struct t_rec *next_record(
ASSERT(x < data_size);
- return (struct t_rec *)&this_cpu(t_data)[x];
+ /* add leading header to get total offset of next record */
+ x += sizeof(struct t_buf);
+ *offset_in_page = x & ~PAGE_MASK;
+
+ /* offset into array of mfns */
+ per_cpu_mfn_nr = x >> PAGE_SHIFT;
+ per_cpu_mfn_offset = t_info->mfn_offset[smp_processor_id()];
+ mfn_list = (uint32_t *)t_info;
+ mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr];
+ this_page = mfn_to_virt(mfn);
+ if (per_cpu_mfn_nr + 1 >= opt_tbuf_size)
+ {
+ /* reached end of buffer? */
+ *next_page = NULL;
+ }
+ else
+ {
+ mfn = mfn_list[per_cpu_mfn_offset + per_cpu_mfn_nr + 1];
+ *next_page = mfn_to_virt(mfn);
+ }
+ return this_page;
}
static inline void __insert_record(struct t_buf *buf,
@@ -497,28 +521,37 @@ static inline void __insert_record(struc
unsigned int rec_size,
const void *extra_data)
{
- struct t_rec *rec;
+ struct t_rec split_rec, *rec;
uint32_t *dst;
+ unsigned char *this_page, *next_page;
unsigned int extra_word = extra / sizeof(u32);
unsigned int local_rec_size = calc_rec_size(cycles, extra);
uint32_t next;
+ uint32_t offset;
+ uint32_t remaining;
BUG_ON(local_rec_size != rec_size);
BUG_ON(extra & 3);
- rec = next_record(buf, &next);
- if ( !rec )
+ this_page = next_record(buf, &next, &next_page, &offset);
+ if ( !this_page )
return;
- /* Double-check once more that we have enough space.
- * Don't bugcheck here, in case the userland tool is doing
- * something stupid. */
- if ( (unsigned char *)rec + rec_size > this_cpu(t_data) + data_size )
+
+ remaining = PAGE_SIZE - offset;
+
+ if ( unlikely(rec_size > remaining) )
{
- if ( printk_ratelimit() )
+ if ( next_page == NULL )
+ {
+ /* access beyond end of buffer */
printk(XENLOG_WARNING
- "%s: size=%08x prod=%08x cons=%08x rec=%u\n",
- __func__, data_size, next, buf->cons, rec_size);
- return;
+ "%s: size=%08x prod=%08x cons=%08x rec=%u remaining=%u\n",
+ __func__, data_size, next, buf->cons, rec_size, remaining);
+ return;
+ }
+ rec = &split_rec;
+ } else {
+ rec = (struct t_rec*)(this_page + offset);
}
rec->event = event;
@@ -535,6 +568,12 @@ static inline void __insert_record(struc
if ( extra_data && extra )
memcpy(dst, extra_data, extra);
+ if ( unlikely(rec_size > remaining) )
+ {
+ memcpy(this_page + offset, rec, remaining);
+ memcpy(next_page, (char *)rec + remaining, rec_size - remaining);
+ }
+
wmb();
next += rec_size;
++++++ 23407-xentrace_allocate_non-contiguous_per-cpu_trace_buffers.patch ++++++
changeset: 23407:b19898ac3e32
user: Olaf Hering
date: Thu May 26 12:36:27 2011 +0100
files: xen/common/trace.c
description:
xentrace: allocate non-contiguous per-cpu trace buffers
Signed-off-by: Olaf Hering
Acked-by: George Dunlap
---
xen/common/trace.c | 92 ++++++++++++++++++++++++++++-------------------------
1 file changed, 50 insertions(+), 42 deletions(-)
Index: xen-4.1.2-testing/xen/common/trace.c
===================================================================
--- xen-4.1.2-testing.orig/xen/common/trace.c
+++ xen-4.1.2-testing/xen/common/trace.c
@@ -166,7 +166,7 @@ static int calculate_tbuf_size(unsigned
*/
static int alloc_trace_bufs(unsigned int pages)
{
- int i, cpu, order;
+ int i, cpu;
/* Start after a fixed-size array of NR_CPUS */
uint32_t *t_info_mfn_list;
uint16_t t_info_first_offset;
@@ -182,34 +182,11 @@ static int alloc_trace_bufs(unsigned int
t_info_first_offset = calc_tinfo_first_offset();
pages = calculate_tbuf_size(pages, t_info_first_offset);
- order = get_order_from_pages(pages);
t_info = alloc_xenheap_pages(get_order_from_pages(t_info_pages), 0);
if ( t_info == NULL )
- goto out_dealloc;
+ goto out_dealloc_t_info;
- /*
- * First, allocate buffers for all of the cpus. If any
- * fails, deallocate what you have so far and exit.
- */
- for_each_online_cpu(cpu)
- {
- void *rawbuf;
- struct t_buf *buf;
-
- if ( (rawbuf = alloc_xenheap_pages(
- order, MEMF_bits(32 + PAGE_SHIFT))) == NULL )
- {
- printk(XENLOG_INFO "xentrace: memory allocation failed "
- "on cpu %d\n", cpu);
- goto out_dealloc;
- }
-
- per_cpu(t_bufs, cpu) = buf = rawbuf;
- buf->cons = buf->prod = 0;
- }
-
- offset = t_info_first_offset;
t_info_mfn_list = (uint32_t *)t_info;
for(i = 0; i < t_info_pages; i++)
@@ -219,27 +196,53 @@ static int alloc_trace_bufs(unsigned int
t_info->tbuf_size = pages;
/*
- * Now share the pages so xentrace can map them, and write them in
- * the global t_info structure.
+ * Allocate buffers for all of the cpus.
+ * If any fails, deallocate what you have so far and exit.
*/
for_each_online_cpu(cpu)
{
- void *rawbuf = per_cpu(t_bufs, cpu);
- struct page_info *p = virt_to_page(rawbuf);
- uint32_t mfn = virt_to_mfn(rawbuf);
+ offset = t_info_first_offset + (cpu * pages);
+ t_info->mfn_offset[cpu] = offset;
for ( i = 0; i < pages; i++ )
{
- share_xen_page_with_privileged_guests(p + i, XENSHARE_writable);
-
- t_info_mfn_list[offset + i]=mfn + i;
+ void *p = alloc_xenheap_pages(0, MEMF_bits(32 + PAGE_SHIFT));
+ if ( !p )
+ {
+ printk(XENLOG_INFO "xentrace: memory allocation failed "
+ "on cpu %d after %d pages\n", cpu, i);
+ t_info_mfn_list[offset + i] = 0;
+ goto out_dealloc;
+ }
+ t_info_mfn_list[offset + i] = virt_to_mfn(p);
}
- t_info->mfn_offset[cpu]=offset;
- printk(XENLOG_INFO "xentrace: p%d mfn %"PRIx32" offset %d\n",
- cpu, mfn, offset);
- offset+=i;
+ }
+
+ /*
+ * Initialize buffers for all of the cpus.
+ */
+ for_each_online_cpu(cpu)
+ {
+ struct t_buf *buf;
+ struct page_info *pg;
spin_lock_init(&per_cpu(t_lock, cpu));
+
+ offset = t_info->mfn_offset[cpu];
+
+ /* Initialize the buffer metadata */
+ per_cpu(t_bufs, cpu) = buf = mfn_to_virt(t_info_mfn_list[offset]);
+ buf->cons = buf->prod = 0;
+
+ printk(XENLOG_INFO "xentrace: p%d mfn %x offset %u\n",
+ cpu, t_info_mfn_list[offset], offset);
+
+ /* Now share the trace pages */
+ for ( i = 0; i < pages; i++ )
+ {
+ pg = mfn_to_page(t_info_mfn_list[offset + i]);
+ share_xen_page_with_privileged_guests(pg, XENSHARE_writable);
+ }
}
data_size = (pages * PAGE_SIZE - sizeof(struct t_buf));
@@ -255,14 +258,19 @@ static int alloc_trace_bufs(unsigned int
out_dealloc:
for_each_online_cpu(cpu)
{
- void *rawbuf = per_cpu(t_bufs, cpu);
- per_cpu(t_bufs, cpu) = NULL;
- if ( rawbuf )
+ offset = t_info->mfn_offset[cpu];
+ if ( !offset )
+ continue;
+ for ( i = 0; i < pages; i++ )
{
- ASSERT(!(virt_to_page(rawbuf)->count_info & PGC_allocated));
- free_xenheap_pages(rawbuf, order);
+ uint32_t mfn = t_info_mfn_list[offset + i];
+ if ( !mfn )
+ break;
+ ASSERT(!(mfn_to_page(mfn)->count_info & PGC_allocated));
+ free_xenheap_pages(mfn_to_virt(mfn), 0);
}
}
+out_dealloc_t_info:
free_xenheap_pages(t_info, get_order_from_pages(t_info_pages));
t_info = NULL;
printk(XENLOG_WARNING "xentrace: allocation failed! Tracing disabled.\n");
++++++ 23462-libxc-cpu-feature.patch ++++++
References: FATE#311951
# HG changeset patch
# User Keir Fraser
# Date 1307023264 -3600
# Node ID 4804af7048cafecfc014c30cfea374eb0a0360e8
# Parent 5839e797a1307fceffcd0b9ad35ed31644378b47
libxc: Simplify and clean up xc_cpufeature.h
* Remove Linux-private defns with no direct relation to CPUID
* Remove word offsets into Linux-defined cpu_caps array
* Hard tabs -> soft tabs
Signed-off-by: Keir Fraser
# HG changeset patch
# User Keir Fraser
# Date 1307118421 -3600
# Node ID bcd2476c2e2d00dc6371e52fbff66fe3178b7944
# Parent 55c5eff9bf84d4c5f3463c01f038edc1c46f30bc
libxc: Don't refer to meaningless 'word offsets' in xc_cpufeature.h
Signed-off-by: Keir Fraser
Index: xen-4.1.4-testing/tools/libxc/xc_cpufeature.h
===================================================================
--- xen-4.1.4-testing.orig/tools/libxc/xc_cpufeature.h
+++ xen-4.1.4-testing/tools/libxc/xc_cpufeature.h
@@ -17,136 +17,117 @@
#ifndef __LIBXC_CPUFEATURE_H
#define __LIBXC_CPUFEATURE_H
-/* Intel-defined CPU features, CPUID level 0x00000001 (edx), word 0 */
-#define X86_FEATURE_FPU (0*32+ 0) /* Onboard FPU */
-#define X86_FEATURE_VME (0*32+ 1) /* Virtual Mode Extensions */
-#define X86_FEATURE_DE (0*32+ 2) /* Debugging Extensions */
-#define X86_FEATURE_PSE (0*32+ 3) /* Page Size Extensions */
-#define X86_FEATURE_TSC (0*32+ 4) /* Time Stamp Counter */
-#define X86_FEATURE_MSR (0*32+ 5) /* Model-Specific Registers, RDMSR, WRMSR */
-#define X86_FEATURE_PAE (0*32+ 6) /* Physical Address Extensions */
-#define X86_FEATURE_MCE (0*32+ 7) /* Machine Check Architecture */
-#define X86_FEATURE_CX8 (0*32+ 8) /* CMPXCHG8 instruction */
-#define X86_FEATURE_APIC (0*32+ 9) /* Onboard APIC */
-#define X86_FEATURE_SEP (0*32+11) /* SYSENTER/SYSEXIT */
-#define X86_FEATURE_MTRR (0*32+12) /* Memory Type Range Registers */
-#define X86_FEATURE_PGE (0*32+13) /* Page Global Enable */
-#define X86_FEATURE_MCA (0*32+14) /* Machine Check Architecture */
-#define X86_FEATURE_CMOV (0*32+15) /* CMOV instruction (FCMOVCC and FCOMI too if FPU present) */
-#define X86_FEATURE_PAT (0*32+16) /* Page Attribute Table */
-#define X86_FEATURE_PSE36 (0*32+17) /* 36-bit PSEs */
-#define X86_FEATURE_PN (0*32+18) /* Processor serial number */
-#define X86_FEATURE_CLFLSH (0*32+19) /* Supports the CLFLUSH instruction */
-#define X86_FEATURE_DS (0*32+21) /* Debug Store */
-#define X86_FEATURE_ACPI (0*32+22) /* ACPI via MSR */
-#define X86_FEATURE_MMX (0*32+23) /* Multimedia Extensions */
-#define X86_FEATURE_FXSR (0*32+24) /* FXSAVE and FXRSTOR instructions (fast save and restore */
- /* of FPU context), and CR4.OSFXSR available */
-#define X86_FEATURE_XMM (0*32+25) /* Streaming SIMD Extensions */
-#define X86_FEATURE_XMM2 (0*32+26) /* Streaming SIMD Extensions-2 */
-#define X86_FEATURE_SELFSNOOP (0*32+27) /* CPU self snoop */
-#define X86_FEATURE_HT (0*32+28) /* Hyper-Threading */
-#define X86_FEATURE_ACC (0*32+29) /* Automatic clock control */
-#define X86_FEATURE_IA64 (0*32+30) /* IA-64 processor */
-#define X86_FEATURE_PBE (0*32+31) /* Pending Break Enable */
+/* Intel-defined CPU features, CPUID level 0x00000001 (edx) */
+#define X86_FEATURE_FPU 0 /* Onboard FPU */
+#define X86_FEATURE_VME 1 /* Virtual Mode Extensions */
+#define X86_FEATURE_DE 2 /* Debugging Extensions */
+#define X86_FEATURE_PSE 3 /* Page Size Extensions */
+#define X86_FEATURE_TSC 4 /* Time Stamp Counter */
+#define X86_FEATURE_MSR 5 /* Model-Specific Registers, RDMSR, WRMSR */
+#define X86_FEATURE_PAE 6 /* Physical Address Extensions */
+#define X86_FEATURE_MCE 7 /* Machine Check Architecture */
+#define X86_FEATURE_CX8 8 /* CMPXCHG8 instruction */
+#define X86_FEATURE_APIC 9 /* Onboard APIC */
+#define X86_FEATURE_SEP 11 /* SYSENTER/SYSEXIT */
+#define X86_FEATURE_MTRR 12 /* Memory Type Range Registers */
+#define X86_FEATURE_PGE 13 /* Page Global Enable */
+#define X86_FEATURE_MCA 14 /* Machine Check Architecture */
+#define X86_FEATURE_CMOV 15 /* CMOV instruction */
+#define X86_FEATURE_PAT 16 /* Page Attribute Table */
+#define X86_FEATURE_PSE36 17 /* 36-bit PSEs */
+#define X86_FEATURE_PN 18 /* Processor serial number */
+#define X86_FEATURE_CLFLSH 19 /* Supports the CLFLUSH instruction */
+#define X86_FEATURE_DS 21 /* Debug Store */
+#define X86_FEATURE_ACPI 22 /* ACPI via MSR */
+#define X86_FEATURE_MMX 23 /* Multimedia Extensions */
+#define X86_FEATURE_FXSR 24 /* FXSAVE and FXRSTOR instructions */
+#define X86_FEATURE_XMM 25 /* Streaming SIMD Extensions */
+#define X86_FEATURE_XMM2 26 /* Streaming SIMD Extensions-2 */
+#define X86_FEATURE_SELFSNOOP 27 /* CPU self snoop */
+#define X86_FEATURE_HT 28 /* Hyper-Threading */
+#define X86_FEATURE_ACC 29 /* Automatic clock control */
+#define X86_FEATURE_IA64 30 /* IA-64 processor */
+#define X86_FEATURE_PBE 31 /* Pending Break Enable */
-/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
+/* AMD-defined CPU features, CPUID level 0x80000001 */
/* Don't duplicate feature flags which are redundant with Intel! */
-#define X86_FEATURE_SYSCALL (1*32+11) /* SYSCALL/SYSRET */
-#define X86_FEATURE_MP (1*32+19) /* MP Capable. */
-#define X86_FEATURE_NX (1*32+20) /* Execute Disable */
-#define X86_FEATURE_MMXEXT (1*32+22) /* AMD MMX extensions */
-#define X86_FEATURE_FFXSR (1*32+25) /* FFXSR instruction optimizations */
-#define X86_FEATURE_PAGE1GB (1*32+26) /* 1Gb large page support */
-#define X86_FEATURE_RDTSCP (1*32+27) /* RDTSCP */
-#define X86_FEATURE_LM (1*32+29) /* Long Mode (x86-64) */
-#define X86_FEATURE_3DNOWEXT (1*32+30) /* AMD 3DNow! extensions */
-#define X86_FEATURE_3DNOW (1*32+31) /* 3DNow! */
-
-/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
-#define X86_FEATURE_RECOVERY (2*32+ 0) /* CPU in recovery mode */
-#define X86_FEATURE_LONGRUN (2*32+ 1) /* Longrun power control */
-#define X86_FEATURE_LRTI (2*32+ 3) /* LongRun table interface */
-
-/* Other features, Linux-defined mapping, word 3 */
-/* This range is used for feature bits which conflict or are synthesized */
-#define X86_FEATURE_CXMMX (3*32+ 0) /* Cyrix MMX extensions */
-#define X86_FEATURE_K6_MTRR (3*32+ 1) /* AMD K6 nonstandard MTRRs */
-#define X86_FEATURE_CYRIX_ARR (3*32+ 2) /* Cyrix ARRs (= MTRRs) */
-#define X86_FEATURE_CENTAUR_MCR (3*32+ 3) /* Centaur MCRs (= MTRRs) */
-/* cpu types for specific tunings: */
-#define X86_FEATURE_K8 (3*32+ 4) /* Opteron, Athlon64 */
-#define X86_FEATURE_K7 (3*32+ 5) /* Athlon */
-#define X86_FEATURE_P3 (3*32+ 6) /* P3 */
-#define X86_FEATURE_P4 (3*32+ 7) /* P4 */
-#define X86_FEATURE_CONSTANT_TSC (3*32+ 8) /* TSC ticks at a constant rate */
-
-/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
-#define X86_FEATURE_XMM3 (4*32+ 0) /* Streaming SIMD Extensions-3 */
-#define X86_FEATURE_PCLMULQDQ (4*32+ 1) /* Carry-less multiplication */
-#define X86_FEATURE_DTES64 (4*32+ 2) /* 64-bit Debug Store */
-#define X86_FEATURE_MWAIT (4*32+ 3) /* Monitor/Mwait support */
-#define X86_FEATURE_DSCPL (4*32+ 4) /* CPL Qualified Debug Store */
-#define X86_FEATURE_VMXE (4*32+ 5) /* Virtual Machine Extensions */
-#define X86_FEATURE_SMXE (4*32+ 6) /* Safer Mode Extensions */
-#define X86_FEATURE_EST (4*32+ 7) /* Enhanced SpeedStep */
-#define X86_FEATURE_TM2 (4*32+ 8) /* Thermal Monitor 2 */
-#define X86_FEATURE_SSSE3 (4*32+ 9) /* Supplemental Streaming SIMD Extensions-3 */
-#define X86_FEATURE_CID (4*32+10) /* Context ID */
-#define X86_FEATURE_CX16 (4*32+13) /* CMPXCHG16B */
-#define X86_FEATURE_XTPR (4*32+14) /* Send Task Priority Messages */
-#define X86_FEATURE_PDCM (4*32+15) /* Perf/Debug Capability MSR */
-#define X86_FEATURE_PCID (4*32+17) /* Process Context ID */
-#define X86_FEATURE_DCA (4*32+18) /* Direct Cache Access */
-#define X86_FEATURE_SSE4_1 (4*32+19) /* Streaming SIMD Extensions 4.1 */
-#define X86_FEATURE_SSE4_2 (4*32+20) /* Streaming SIMD Extensions 4.2 */
-#define X86_FEATURE_X2APIC (4*32+21) /* x2APIC */
-#define X86_FEATURE_POPCNT (4*32+23) /* POPCNT instruction */
-#define X86_FEATURE_TSC_DEADLINE (4*32+24) /* "tdt" TSC Deadline Timer */
-#define X86_FEATURE_AES (4*32+25) /* AES acceleration instructions */
-#define X86_FEATURE_XSAVE (4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV */
-#define X86_FEATURE_AVX (4*32+28) /* Advanced Vector Extensions */
-#define X86_FEATURE_F16C (4*32+29) /* Half-precision convert instruction */
-#define X86_FEATURE_RDRAND (4*32+30) /* Digital Random Number Generator */
-#define X86_FEATURE_HYPERVISOR (4*32+31) /* Running under some hypervisor */
-
-/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
-#define X86_FEATURE_XSTORE (5*32+ 2) /* on-CPU RNG present (xstore insn) */
-#define X86_FEATURE_XSTORE_EN (5*32+ 3) /* on-CPU RNG enabled */
-#define X86_FEATURE_XCRYPT (5*32+ 6) /* on-CPU crypto (xcrypt insn) */
-#define X86_FEATURE_XCRYPT_EN (5*32+ 7) /* on-CPU crypto enabled */
-#define X86_FEATURE_ACE2 (5*32+ 8) /* Advanced Cryptography Engine v2 */
-#define X86_FEATURE_ACE2_EN (5*32+ 9) /* ACE v2 enabled */
-#define X86_FEATURE_PHE (5*32+ 10) /* PadLock Hash Engine */
-#define X86_FEATURE_PHE_EN (5*32+ 11) /* PHE enabled */
-#define X86_FEATURE_PMM (5*32+ 12) /* PadLock Montgomery Multiplier */
-#define X86_FEATURE_PMM_EN (5*32+ 13) /* PMM enabled */
-
-/* More extended AMD flags: CPUID level 0x80000001, ecx, word 6 */
-#define X86_FEATURE_LAHF_LM (6*32+ 0) /* LAHF/SAHF in long mode */
-#define X86_FEATURE_CMP_LEGACY (6*32+ 1) /* If yes HyperThreading not valid */
-#define X86_FEATURE_SVM (6*32+ 2) /* Secure virtual machine */
-#define X86_FEATURE_EXTAPIC (6*32+ 3) /* Extended APIC space */
-#define X86_FEATURE_CR8_LEGACY (6*32+ 4) /* CR8 in 32-bit mode */
-#define X86_FEATURE_ABM (6*32+ 5) /* Advanced bit manipulation */
-#define X86_FEATURE_SSE4A (6*32+ 6) /* SSE-4A */
-#define X86_FEATURE_MISALIGNSSE (6*32+ 7) /* Misaligned SSE mode */
-#define X86_FEATURE_3DNOWPREFETCH (6*32+ 8) /* 3DNow prefetch instructions */
-#define X86_FEATURE_OSVW (6*32+ 9) /* OS Visible Workaround */
-#define X86_FEATURE_IBS (6*32+10) /* Instruction Based Sampling */
-#define X86_FEATURE_XOP (6*32+11) /* extended AVX instructions */
-#define X86_FEATURE_SKINIT (6*32+12) /* SKINIT/STGI instructions */
-#define X86_FEATURE_WDT (6*32+13) /* Watchdog timer */
-#define X86_FEATURE_LWP (6*32+15) /* Light Weight Profiling */
-#define X86_FEATURE_FMA4 (6*32+16) /* 4 operands MAC instructions */
-#define X86_FEATURE_NODEID_MSR (6*32+19) /* NodeId MSR */
-#define X86_FEATURE_TBM (6*32+21) /* trailing bit manipulations */
-#define X86_FEATURE_TOPOEXT (6*32+22) /* topology extensions CPUID leafs */
-
-/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
-#define X86_FEATURE_FSGSBASE (7*32+ 0) /* {RD,WR}{FS,GS}BASE instructions */
-#define X86_FEATURE_SMEP (7*32+ 7) /* Supervisor Mode Execution Protection */
-#define X86_FEATURE_ERMS (7*32+ 9) /* Enhanced REP MOVSB/STOSB */
-#define X86_FEATURE_INVPCID (7*32+10) /* Invalidate Process Context ID */
+#define X86_FEATURE_SYSCALL 11 /* SYSCALL/SYSRET */
+#define X86_FEATURE_MP 19 /* MP Capable. */
+#define X86_FEATURE_NX 20 /* Execute Disable */
+#define X86_FEATURE_MMXEXT 22 /* AMD MMX extensions */
+#define X86_FEATURE_FFXSR 25 /* FFXSR instruction optimizations */
+#define X86_FEATURE_PAGE1GB 26 /* 1Gb large page support */
+#define X86_FEATURE_RDTSCP 27 /* RDTSCP */
+#define X86_FEATURE_LM 29 /* Long Mode (x86-64) */
+#define X86_FEATURE_3DNOWEXT 30 /* AMD 3DNow! extensions */
+#define X86_FEATURE_3DNOW 31 /* 3DNow! */
+
+/* Intel-defined CPU features, CPUID level 0x00000001 (ecx) */
+#define X86_FEATURE_XMM3 0 /* Streaming SIMD Extensions-3 */
+#define X86_FEATURE_PCLMULQDQ 1 /* Carry-less multiplication */
+#define X86_FEATURE_DTES64 2 /* 64-bit Debug Store */
+#define X86_FEATURE_MWAIT 3 /* Monitor/Mwait support */
+#define X86_FEATURE_DSCPL 4 /* CPL Qualified Debug Store */
+#define X86_FEATURE_VMXE 5 /* Virtual Machine Extensions */
+#define X86_FEATURE_SMXE 6 /* Safer Mode Extensions */
+#define X86_FEATURE_EST 7 /* Enhanced SpeedStep */
+#define X86_FEATURE_TM2 8 /* Thermal Monitor 2 */
+#define X86_FEATURE_SSSE3 9 /* Supplemental Streaming SIMD Exts-3 */
+#define X86_FEATURE_CID 10 /* Context ID */
+#define X86_FEATURE_CX16 13 /* CMPXCHG16B */
+#define X86_FEATURE_XTPR 14 /* Send Task Priority Messages */
+#define X86_FEATURE_PDCM 15 /* Perf/Debug Capability MSR */
+#define X86_FEATURE_PCID 17 /* Process Context ID */
+#define X86_FEATURE_DCA 18 /* Direct Cache Access */
+#define X86_FEATURE_SSE4_1 19 /* Streaming SIMD Extensions 4.1 */
+#define X86_FEATURE_SSE4_2 20 /* Streaming SIMD Extensions 4.2 */
+#define X86_FEATURE_X2APIC 21 /* x2APIC */
+#define X86_FEATURE_POPCNT 23 /* POPCNT instruction */
+#define X86_FEATURE_TSC_DEADLINE 24 /* "tdt" TSC Deadline Timer */
+#define X86_FEATURE_AES 25 /* AES acceleration instructions */
+#define X86_FEATURE_XSAVE 26 /* XSAVE/XRSTOR/XSETBV/XGETBV */
+#define X86_FEATURE_AVX 28 /* Advanced Vector Extensions */
+#define X86_FEATURE_F16C 29 /* Half-precision convert instruction */
+#define X86_FEATURE_RDRAND 30 /* Digital Random Number Generator */
+#define X86_FEATURE_HYPERVISOR 31 /* Running under some hypervisor */
+
+/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001 */
+#define X86_FEATURE_XSTORE 2 /* on-CPU RNG present (xstore insn) */
+#define X86_FEATURE_XSTORE_EN 3 /* on-CPU RNG enabled */
+#define X86_FEATURE_XCRYPT 6 /* on-CPU crypto (xcrypt insn) */
+#define X86_FEATURE_XCRYPT_EN 7 /* on-CPU crypto enabled */
+#define X86_FEATURE_ACE2 8 /* Advanced Cryptography Engine v2 */
+#define X86_FEATURE_ACE2_EN 9 /* ACE v2 enabled */
+#define X86_FEATURE_PHE 10 /* PadLock Hash Engine */
+#define X86_FEATURE_PHE_EN 11 /* PHE enabled */
+#define X86_FEATURE_PMM 12 /* PadLock Montgomery Multiplier */
+#define X86_FEATURE_PMM_EN 13 /* PMM enabled */
+
+/* More extended AMD flags: CPUID level 0x80000001, ecx */
+#define X86_FEATURE_LAHF_LM 0 /* LAHF/SAHF in long mode */
+#define X86_FEATURE_CMP_LEGACY 1 /* If yes HyperThreading not valid */
+#define X86_FEATURE_SVM 2 /* Secure virtual machine */
+#define X86_FEATURE_EXTAPIC 3 /* Extended APIC space */
+#define X86_FEATURE_CR8_LEGACY 4 /* CR8 in 32-bit mode */
+#define X86_FEATURE_ABM 5 /* Advanced bit manipulation */
+#define X86_FEATURE_SSE4A 6 /* SSE-4A */
+#define X86_FEATURE_MISALIGNSSE 7 /* Misaligned SSE mode */
+#define X86_FEATURE_3DNOWPREFETCH 8 /* 3DNow prefetch instructions */
+#define X86_FEATURE_OSVW 9 /* OS Visible Workaround */
+#define X86_FEATURE_IBS 10 /* Instruction Based Sampling */
+#define X86_FEATURE_XOP 11 /* extended AVX instructions */
+#define X86_FEATURE_SKINIT 12 /* SKINIT/STGI instructions */
+#define X86_FEATURE_WDT 13 /* Watchdog timer */
+#define X86_FEATURE_LWP 15 /* Light Weight Profiling */
+#define X86_FEATURE_FMA4 16 /* 4 operands MAC instructions */
+#define X86_FEATURE_NODEID_MSR 19 /* NodeId MSR */
+#define X86_FEATURE_TBM 21 /* trailing bit manipulations */
+#define X86_FEATURE_TOPOEXT 22 /* topology extensions CPUID leafs */
+
+/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx) */
+#define X86_FEATURE_FSGSBASE 0 /* {RD,WR}{FS,GS}BASE instructions */
+#define X86_FEATURE_SMEP 7 /* Supervisor Mode Execution Protection */
+#define X86_FEATURE_ERMS 9 /* Enhanced REP MOVSB/STOSB */
+#define X86_FEATURE_INVPCID 10 /* Invalidate Process Context ID */
#endif /* __LIBXC_CPUFEATURE_H */
Index: xen-4.1.4-testing/tools/libxc/xc_cpuid_x86.c
===================================================================
--- xen-4.1.4-testing.orig/tools/libxc/xc_cpuid_x86.c
+++ xen-4.1.4-testing/tools/libxc/xc_cpuid_x86.c
@@ -25,9 +25,9 @@
#include "xc_cpufeature.h"
#include
-#define bitmaskof(idx) (1u << ((idx) & 31))
-#define clear_bit(idx, dst) ((dst) &= ~(1u << ((idx) & 31)))
-#define set_bit(idx, dst) ((dst) |= (1u << ((idx) & 31)))
+#define bitmaskof(idx) (1u << (idx))
+#define clear_bit(idx, dst) ((dst) &= ~(1u << (idx)))
+#define set_bit(idx, dst) ((dst) |= (1u << (idx)))
#define DEF_MAX_BASE 0x0000000du
#define DEF_MAX_EXT 0x80000008u
++++++ 23506-x86_Disable_set_gpfn_from_mfn_until_m2p_table_is_allocated..patch ++++++
changeset: 23506:d1309a79bde8
user: Keir Fraser
date: Fri Jun 10 08:18:33 2011 +0100
files: xen/arch/x86/x86_64/mm.c xen/include/asm-x86/mm.h
description:
x86: Disable set_gpfn_from_mfn until m2p table is allocated.
This is a prerequisite for calling set_gpfn_from_mfn() unconditionally
from free_heap_pages().
Signed-off-by: Keir Fraser
---
xen/arch/x86/x86_64/mm.c | 4 ++++
xen/include/asm-x86/mm.h | 15 +++++++++++++--
2 files changed, 17 insertions(+), 2 deletions(-)
Index: xen-4.1.5-testing/xen/arch/x86/x86_64/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/x86_64/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/x86_64/mm.c
@@ -47,6 +47,8 @@ unsigned int __read_mostly pfn_pdx_hole_
unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
+bool_t __read_mostly machine_to_phys_mapping_valid = 0;
+
/* Top-level master (and idle-domain) page directory. */
l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
idle_pg_table[L4_PAGETABLE_ENTRIES];
@@ -800,6 +802,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
+ machine_to_phys_mapping_valid = 1;
+
/* Set up linear page table mapping. */
l4e_write(&idle_pg_table[l4_table_offset(LINEAR_PT_VIRT_START)],
l4e_from_paddr(__pa(idle_pg_table), __PAGE_HYPERVISOR));
Index: xen-4.1.5-testing/xen/include/asm-x86/mm.h
===================================================================
--- xen-4.1.5-testing.orig/xen/include/asm-x86/mm.h
+++ xen-4.1.5-testing/xen/include/asm-x86/mm.h
@@ -470,7 +470,7 @@ TYPE_SAFE(unsigned long,mfn);
#ifdef CONFIG_COMPAT
#define compat_machine_to_phys_mapping ((unsigned int *)RDWR_COMPAT_MPT_VIRT_START)
-#define set_gpfn_from_mfn(mfn, pfn) ({ \
+#define _set_gpfn_from_mfn(mfn, pfn) ({ \
struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
unsigned long entry = (d && (d == dom_cow)) ? \
SHARED_M2P_ENTRY : (pfn); \
@@ -479,7 +479,7 @@ TYPE_SAFE(unsigned long,mfn);
machine_to_phys_mapping[(mfn)] = (entry)); \
})
#else
-#define set_gpfn_from_mfn(mfn, pfn) ({ \
+#define _set_gpfn_from_mfn(mfn, pfn) ({ \
struct domain *d = page_get_owner(__mfn_to_page(mfn)); \
if(d && (d == dom_cow)) \
machine_to_phys_mapping[(mfn)] = SHARED_M2P_ENTRY; \
@@ -487,6 +487,17 @@ TYPE_SAFE(unsigned long,mfn);
machine_to_phys_mapping[(mfn)] = (pfn); \
})
#endif
+
+/*
+ * Disable some users of set_gpfn_from_mfn() (e.g., free_heap_pages()) until
+ * the machine_to_phys_mapping is actually set up.
+ */
+extern bool_t machine_to_phys_mapping_valid;
+#define set_gpfn_from_mfn(mfn, pfn) do { \
+ if ( machine_to_phys_mapping_valid ) \
+ _set_gpfn_from_mfn(mfn, pfn); \
+} while (0)
+
#define get_gpfn_from_mfn(mfn) (machine_to_phys_mapping[(mfn)])
#define mfn_to_gmfn(_d, mfn) \
++++++ 23507-xenpaging_update_machine_to_phys_mapping_during_page_deallocation.patch ++++++
changeset: 23507:0a29c8c3ddf7
user: Keir Fraser
date: Fri Jun 10 08:19:07 2011 +0100
files: xen/common/page_alloc.c
description:
xenpaging: update machine_to_phys_mapping[] during page deallocation
The machine_to_phys_mapping[] array needs updating during page
deallocation. If that page is allocated again, a call to
get_gpfn_from_mfn() will still return an old gfn from another guest.
This will cause trouble because this gfn number has no or different
meaning in the context of the current guest.
This happens when the entire guest ram is paged-out before
xen_vga_populate_vram() runs. Then XENMEM_populate_physmap is called
with gfn 0xff000. A new page is allocated with alloc_domheap_pages.
This new page does not have a gfn yet. However, in
guest_physmap_add_entry() the passed mfn maps still to an old gfn
(perhaps from another old guest). This old gfn is in paged-out state
in this guests context and has no mfn anymore. As a result, the
ASSERT() triggers because p2m_is_ram() is true for p2m_ram_paging*
types. If the machine_to_phys_mapping[] array is updated properly,
both loops in guest_physmap_add_entry() turn into no-ops for the new
page and the mfn/gfn mapping will be done at the end of the function.
If XENMEM_add_to_physmap is used with XENMAPSPACE_gmfn,
get_gpfn_from_mfn() will return an appearently valid gfn. As a
result, guest_physmap_remove_page() is called. The ASSERT in
p2m_remove_page triggers because the passed mfn does not match the old
mfn for the passed gfn.
Signed-off-by: Olaf Hering
---
xen/common/page_alloc.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
Index: xen-4.1.5-testing/xen/common/page_alloc.c
===================================================================
--- xen-4.1.5-testing.orig/xen/common/page_alloc.c
+++ xen-4.1.5-testing/xen/common/page_alloc.c
@@ -556,7 +556,7 @@ static int reserve_offlined_page(struct
static void free_heap_pages(
struct page_info *pg, unsigned int order)
{
- unsigned long mask;
+ unsigned long mask, mfn = page_to_mfn(pg);
unsigned int i, node = phys_to_nid(page_to_maddr(pg)), tainted = 0;
unsigned int zone = page_to_zone(pg);
@@ -567,6 +567,10 @@ static void free_heap_pages(
for ( i = 0; i < (1 << order); i++ )
{
+ /* This page is not a guest frame any more. */
+ page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
+ set_gpfn_from_mfn(mfn + i, INVALID_M2P_ENTRY);
+
/*
* Cannot assume that count_info == 0, as there are some corner cases
* where it isn't the case and yet it isn't a bug:
++++++ 23508-vmx-proc-based-ctls-probe.patch ++++++
# HG changeset patch
# User Keir Fraser
# Date 1307691167 -3600
# Node ID 2ef6bbee50371e1135236035ed1a9a7b8748e09f
# Parent 0a29c8c3ddf7395ea8e68c5f4cd8633023490022
x86/vmx: Small fixes to MSR_IA32_VMX_PROCBASED_CTLS feature probing.
Should check for VIRTUAL_INTR_PENDING as we unconditionally make use
of it. Also check for CR8 exiting unconditionally on x86/64, as this
is of use to nestedvmx, and every 64-bit cpu should support it.
Signed-off-by: Eddie Dong
Signed-off-by: Keir Fraser
Index: xen-4.1.3-testing/xen/arch/x86/hvm/vmx/vmcs.c
===================================================================
--- xen-4.1.3-testing.orig/xen/arch/x86/hvm/vmx/vmcs.c
+++ xen-4.1.3-testing/xen/arch/x86/hvm/vmx/vmcs.c
@@ -143,6 +143,11 @@ static int vmx_init_vmcs_config(void)
MSR_IA32_VMX_PINBASED_CTLS, &mismatch);
min = (CPU_BASED_HLT_EXITING |
+ CPU_BASED_VIRTUAL_INTR_PENDING |
+#ifdef __x86_64__
+ CPU_BASED_CR8_LOAD_EXITING |
+ CPU_BASED_CR8_STORE_EXITING |
+#endif
CPU_BASED_INVLPG_EXITING |
CPU_BASED_CR3_LOAD_EXITING |
CPU_BASED_CR3_STORE_EXITING |
@@ -161,13 +166,9 @@ static int vmx_init_vmcs_config(void)
MSR_IA32_VMX_PROCBASED_CTLS, &mismatch);
_vmx_cpu_based_exec_control &= ~CPU_BASED_RDTSC_EXITING;
#ifdef __x86_64__
- if ( !(_vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW) )
- {
- min |= CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING;
- _vmx_cpu_based_exec_control = adjust_vmx_controls(
- "CPU-Based Exec Control", min, opt,
- MSR_IA32_VMX_PROCBASED_CTLS, &mismatch);
- }
+ if ( _vmx_cpu_based_exec_control & CPU_BASED_TPR_SHADOW )
+ _vmx_cpu_based_exec_control &=
+ ~(CPU_BASED_CR8_LOAD_EXITING | CPU_BASED_CR8_STORE_EXITING);
#endif
if ( _vmx_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS )
++++++ 23509-x86_32_Fix_build_Define_machine_to_phys_mapping_valid.patch ++++++
changeset: 23509:782bc7b2661a
user: Keir Fraser
date: Fri Jun 10 13:51:39 2011 +0100
files: xen/arch/x86/x86_32/mm.c
description:
x86_32: Fix build: Define machine_to_phys_mapping_valid
Signed-off-by: Keir Fraser
---
xen/arch/x86/x86_32/mm.c | 4 ++++
1 file changed, 4 insertions(+)
Index: xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/x86_32/mm.c
+++ xen-4.1.2-testing/xen/arch/x86/x86_32/mm.c
@@ -39,6 +39,8 @@ extern l1_pgentry_t l1_identmap[L1_PAGET
unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
+bool_t __read_mostly machine_to_phys_mapping_valid = 0;
+
static unsigned long __read_mostly mpt_size;
void *alloc_xen_pagetable(void)
@@ -123,6 +125,8 @@ void __init paging_init(void)
#undef CNT
#undef MFN
+ machine_to_phys_mapping_valid = 1;
+
/* Create page tables for ioremap()/map_domain_page_global(). */
for ( i = 0; i < (IOREMAP_MBYTES >> (L2_PAGETABLE_SHIFT - 20)); i++ )
{
++++++ 23562-xenpaging_remove_unused_spinlock_in_pager.patch ++++++
changeset: 23562:8a7f52c59d64
user: Olaf Hering
date: Fri Jun 10 10:47:02 2011 +0200
files: tools/xenpaging/mem_event.h tools/xenpaging/spinlock.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h
description:
xenpaging: remove unused spinlock in pager
The spinlock code in the pager is a no-op because xenpaging is a single
threaded application. There is no locking when put_response() places a
response into the ringbuffer.
The only locking is inside the hypervisor, where mem_event_put_request() and
mem_event_get_response() lock the ringbuffer to protect multiple vcpus from
each other.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/mem_event.h | 5 ---
tools/xenpaging/spinlock.h | 69 --------------------------------------------
tools/xenpaging/xenpaging.c | 12 -------
tools/xenpaging/xenpaging.h | 1
4 files changed, 87 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/mem_event.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/mem_event.h
+++ xen-4.1.2-testing/tools/xenpaging/mem_event.h
@@ -25,7 +25,6 @@
#define __XEN_MEM_EVENT_H__
-#include "spinlock.h"
#include "xc.h"
#include
@@ -33,9 +32,6 @@
#include
-#define mem_event_ring_lock_init(_m) spin_lock_init(&(_m)->ring_lock)
-#define mem_event_ring_lock(_m) spin_lock(&(_m)->ring_lock)
-#define mem_event_ring_unlock(_m) spin_unlock(&(_m)->ring_lock)
typedef struct mem_event {
@@ -45,7 +41,6 @@ typedef struct mem_event {
mem_event_back_ring_t back_ring;
mem_event_shared_page_t *shared_page;
void *ring_page;
- spinlock_t ring_lock;
} mem_event_t;
Index: xen-4.1.2-testing/tools/xenpaging/spinlock.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/spinlock.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/******************************************************************************
- * tools/xenpaging/spinlock.h
- *
- * Spinlock implementation.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#ifndef __SPINLOCK_H__
-#define __SPINLOCK_H__
-
-
-#include "bitops.h"
-
-
-#define SPIN_LOCK_UNLOCKED 0
-
-
-typedef int spinlock_t;
-
-
-static inline void spin_lock(spinlock_t *lock)
-{
- while ( test_and_set_bit(1, lock) );
-}
-
-static inline void spin_lock_init(spinlock_t *lock)
-{
- *lock = SPIN_LOCK_UNLOCKED;
-}
-
-static inline void spin_unlock(spinlock_t *lock)
-{
- *lock = SPIN_LOCK_UNLOCKED;
-}
-
-static inline int spin_trylock(spinlock_t *lock)
-{
- return !test_and_set_bit(1, lock);
-}
-
-
-#endif // __SPINLOCK_H__
-
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c
@@ -32,7 +32,6 @@
#include
#include "bitops.h"
-#include "spinlock.h"
#include "file_ops.h"
#include "xc.h"
@@ -127,9 +126,6 @@ static xenpaging_t *xenpaging_init(domid
BACK_RING_INIT(&paging->mem_event.back_ring,
(mem_event_sring_t *)paging->mem_event.ring_page,
PAGE_SIZE);
-
- /* Initialise lock */
- mem_event_ring_lock_init(&paging->mem_event);
/* Initialise Xen */
rc = xc_mem_event_enable(xch, paging->mem_event.domain_id,
@@ -302,8 +298,6 @@ static int get_request(mem_event_t *mem_
mem_event_back_ring_t *back_ring;
RING_IDX req_cons;
- mem_event_ring_lock(mem_event);
-
back_ring = &mem_event->back_ring;
req_cons = back_ring->req_cons;
@@ -315,8 +309,6 @@ static int get_request(mem_event_t *mem_
back_ring->req_cons = req_cons;
back_ring->sring->req_event = req_cons + 1;
- mem_event_ring_unlock(mem_event);
-
return 0;
}
@@ -325,8 +317,6 @@ static int put_response(mem_event_t *mem
mem_event_back_ring_t *back_ring;
RING_IDX rsp_prod;
- mem_event_ring_lock(mem_event);
-
back_ring = &mem_event->back_ring;
rsp_prod = back_ring->rsp_prod_pvt;
@@ -338,8 +328,6 @@ static int put_response(mem_event_t *mem
back_ring->rsp_prod_pvt = rsp_prod;
RING_PUSH_RESPONSES(back_ring);
- mem_event_ring_unlock(mem_event);
-
return 0;
}
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h
@@ -25,7 +25,6 @@
#define __XEN_PAGING2_H__
-#include "spinlock.h"
#include "xc.h"
#include
++++++ 23571-vtd-fault-verbosity.patch ++++++
# HG changeset patch
# User Allen Kay
# Date 1308823884 -3600
# Node ID d3ac71f22e8621d9a7604f82f3976337e6c97a9a
# Parent 065ca14be963fe4da55d629ed0b3692a14253a86
[VTD] print out debug message in vt-d fault handler only when iommu=debug is set
Print out debug messages in vtd_page_fault() handler only when
iommu=debug is set xen boot parameter.
Signed-off-by: Allen Kay
Index: xen-4.1.5-testing/xen/drivers/passthrough/amd/iommu_acpi.c
===================================================================
--- xen-4.1.5-testing.orig/xen/drivers/passthrough/amd/iommu_acpi.c
+++ xen-4.1.5-testing/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -877,7 +877,7 @@ static int __init parse_ivrs_table(struc
BUG_ON(!table);
- if ( amd_iommu_debug )
+ if ( iommu_debug )
dump_acpi_table_header(table);
/* parse IVRS blocks */
Index: xen-4.1.5-testing/xen/drivers/passthrough/iommu.c
===================================================================
--- xen-4.1.5-testing.orig/xen/drivers/passthrough/iommu.c
+++ xen-4.1.5-testing/xen/drivers/passthrough/iommu.c
@@ -48,7 +48,7 @@ bool_t __read_mostly iommu_snoop = 1;
bool_t __read_mostly iommu_qinval = 1;
bool_t __read_mostly iommu_intremap = 1;
bool_t __read_mostly iommu_hap_pt_share;
-bool_t __read_mostly amd_iommu_debug;
+bool_t __read_mostly iommu_debug;
bool_t __read_mostly amd_iommu_perdev_intremap = 1;
static void __init parse_iommu_param(char *s)
@@ -74,8 +74,8 @@ static void __init parse_iommu_param(cha
iommu_qinval = 0;
else if ( !strcmp(s, "no-intremap") )
iommu_intremap = 0;
- else if ( !strcmp(s, "amd-iommu-debug") )
- amd_iommu_debug = 1;
+ else if ( !strcmp(s, "debug") )
+ iommu_debug = 1;
else if ( !strcmp(s, "amd-iommu-perdev-intremap") )
amd_iommu_perdev_intremap = 1;
else if ( !strcmp(s, "amd-iommu-global-intremap") )
Index: xen-4.1.5-testing/xen/drivers/passthrough/vtd/iommu.c
===================================================================
--- xen-4.1.5-testing.orig/xen/drivers/passthrough/vtd/iommu.c
+++ xen-4.1.5-testing/xen/drivers/passthrough/vtd/iommu.c
@@ -844,7 +844,7 @@ static int iommu_page_fault_do_one(struc
if ( fault_type == DMA_REMAP )
{
- dprintk(XENLOG_WARNING VTDPREFIX,
+ INTEL_IOMMU_DEBUG(
"DMAR:[%s] Request device [%02x:%02x.%d] "
"fault addr %"PRIx64", iommu reg = %p\n"
"DMAR:[fault reason %02xh] %s\n",
@@ -853,12 +853,13 @@ static int iommu_page_fault_do_one(struc
PCI_FUNC(source_id & 0xFF), addr, iommu->reg,
fault_reason, reason);
#ifndef __i386__ /* map_domain_page() cannot be used in this context */
- print_vtd_entries(iommu, (source_id >> 8),
+ if (iommu_debug)
+ print_vtd_entries(iommu, (source_id >> 8),
(source_id & 0xff), (addr >> PAGE_SHIFT));
#endif
}
else
- dprintk(XENLOG_WARNING VTDPREFIX,
+ INTEL_IOMMU_DEBUG(
"INTR-REMAP: Request device [%02x:%02x.%d] "
"fault index %"PRIx64", iommu reg = %p\n"
"INTR-REMAP:[fault reason %02xh] %s\n",
@@ -872,26 +873,19 @@ static int iommu_page_fault_do_one(struc
static void iommu_fault_status(u32 fault_status)
{
if ( fault_status & DMA_FSTS_PFO )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Fault Overflow\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Fault Overflow\n");
if ( fault_status & DMA_FSTS_PPF )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Primary Pending Fault\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Primary Pending Fault\n");
if ( fault_status & DMA_FSTS_AFO )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Advanced Fault Overflow\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Advanced Fault Overflow\n");
if ( fault_status & DMA_FSTS_APF )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Advanced Pending Fault\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Advanced Pending Fault\n");
if ( fault_status & DMA_FSTS_IQE )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Queue Error\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Queue Error\n");
if ( fault_status & DMA_FSTS_ICE )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Completion Error\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Completion Error\n");
if ( fault_status & DMA_FSTS_ITE )
- dprintk(XENLOG_ERR VTDPREFIX,
- "iommu_fault_status: Invalidation Time-out Error\n");
+ INTEL_IOMMU_DEBUG("iommu_fault_status: Invalidation Time-out Error\n");
}
#define PRIMARY_FAULT_REG_LEN (16)
Index: xen-4.1.5-testing/xen/drivers/passthrough/vtd/iommu.h
===================================================================
--- xen-4.1.5-testing.orig/xen/drivers/passthrough/vtd/iommu.h
+++ xen-4.1.5-testing/xen/drivers/passthrough/vtd/iommu.h
@@ -512,4 +512,11 @@ struct intel_iommu {
struct acpi_drhd_unit *drhd;
};
+#define INTEL_IOMMU_DEBUG(fmt, args...) \
+ do \
+ { \
+ if ( iommu_debug ) \
+ dprintk(XENLOG_WARNING VTDPREFIX, fmt, ## args); \
+ } while(0)
+
#endif
Index: xen-4.1.5-testing/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
===================================================================
--- xen-4.1.5-testing.orig/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
+++ xen-4.1.5-testing/xen/include/asm-x86/hvm/svm/amd-iommu-proto.h
@@ -34,7 +34,7 @@
#define AMD_IOMMU_DEBUG(fmt, args...) \
do \
{ \
- if ( amd_iommu_debug ) \
+ if ( iommu_debug ) \
printk(XENLOG_INFO "AMD-Vi: " fmt, ## args); \
} while(0)
Index: xen-4.1.5-testing/xen/include/xen/iommu.h
===================================================================
--- xen-4.1.5-testing.orig/xen/include/xen/iommu.h
+++ xen-4.1.5-testing/xen/include/xen/iommu.h
@@ -31,7 +31,7 @@ extern bool_t force_iommu, iommu_verbose
extern bool_t iommu_workaround_bios_bug, iommu_passthrough;
extern bool_t iommu_snoop, iommu_qinval, iommu_intremap;
extern bool_t iommu_hap_pt_share;
-extern bool_t amd_iommu_debug;
+extern bool_t iommu_debug;
extern bool_t amd_iommu_perdev_intremap;
/* Does this domain have a P2M table we can use as its IOMMU pagetable? */
++++++ 23574-x86-dom0-compressed-ELF.patch ++++++
References: fate#311376, fate#311529, bnc#578927, bnc#628554
# HG changeset patch
# User Jan Beulich
# Date 1308825237 -3600
# Node ID d7644abc218d3232b9d957ce94fc4b4bcc1f456e
# Parent 584c2e5e03d96f912cdfe90f8e9f910d5d661706
x86: allow Dom0 image to be compressed ELF
Rather than being able to decompress only the payloads of bzImage
containers, extend the logic to also decompress simple compressed ELF
images. At once, allow uncompressed bzImage payloads.
This is a prerequisite for native EFI booting support (where, in the
absence of a capable secondary boot loader, the image will always be
in compressed form).
Signed-off-by: Jan Beulich
Index: xen-4.1.2-testing/xen/arch/x86/bzimage.c
===================================================================
--- xen-4.1.2-testing.orig/xen/arch/x86/bzimage.c
+++ xen-4.1.2-testing/xen/arch/x86/bzimage.c
@@ -5,6 +5,7 @@
#include
#include
#include
+#include
#include
#define HEAPORDER 3
@@ -199,25 +200,36 @@ static __init int bzimage_check(struct s
return 1;
}
-int __init bzimage_headroom(char *image_start, unsigned long image_length)
+static unsigned long __initdata orig_image_len;
+
+unsigned long __init bzimage_headroom(char *image_start,
+ unsigned long image_length)
{
struct setup_header *hdr = (struct setup_header *)image_start;
- char *img;
- int err, headroom;
+ int err;
+ unsigned long headroom;
err = bzimage_check(hdr, image_length);
- if (err < 1)
+ if ( err < 0 )
return 0;
- img = image_start + (hdr->setup_sects+1) * 512;
- img += hdr->payload_offset;
+ if ( err > 0 )
+ {
+ image_start += (hdr->setup_sects + 1) * 512 + hdr->payload_offset;
+ image_length = hdr->payload_length;
+ }
+
+ if ( elf_is_elfbinary(image_start) )
+ return 0;
- headroom = output_length(img, hdr->payload_length);
- if (gzip_check(img, hdr->payload_length)) {
+ orig_image_len = image_length;
+ headroom = output_length(image_start, image_length);
+ if (gzip_check(image_start, image_length))
+ {
headroom += headroom >> 12; /* Add 8 bytes for every 32K input block */
headroom += (32768 + 18); /* Add 32K + 18 bytes of extra headroom */
} else
- headroom += hdr->payload_length;
+ headroom += image_length;
headroom = (headroom + 4095) & ~4095;
return headroom;
@@ -229,18 +241,24 @@ int __init bzimage_parse(char *image_bas
int err = bzimage_check(hdr, *image_len);
unsigned long output_len;
- if (err < 1)
+ if ( err < 0 )
return err;
+ if ( err > 0 )
+ {
+ *image_start += (hdr->setup_sects + 1) * 512 + hdr->payload_offset;
+ *image_len = hdr->payload_length;
+ }
+
+ if ( elf_is_elfbinary(*image_start) )
+ return 0;
+
BUG_ON(!(image_base < *image_start));
- *image_start += (hdr->setup_sects+1) * 512;
- *image_start += hdr->payload_offset;
- *image_len = hdr->payload_length;
- output_len = output_length(*image_start, *image_len);
+ output_len = output_length(*image_start, orig_image_len);
- if ( (err = perform_gunzip(image_base, *image_start, *image_len)) > 0 )
- err = decompress(*image_start, *image_len, image_base);
+ if ( (err = perform_gunzip(image_base, *image_start, orig_image_len)) > 0 )
+ err = decompress(*image_start, orig_image_len, image_base);
if ( !err )
{
Index: xen-4.1.2-testing/xen/include/asm-x86/bzimage.h
===================================================================
--- xen-4.1.2-testing.orig/xen/include/asm-x86/bzimage.h
+++ xen-4.1.2-testing/xen/include/asm-x86/bzimage.h
@@ -4,10 +4,9 @@
#include
#include
-int __init bzimage_headroom(char *image_start, unsigned long image_length);
+unsigned long bzimage_headroom(char *image_start, unsigned long image_length);
-int __init bzimage_parse(char *image_base,
- char **image_start,
- unsigned long *image_len);
+int bzimage_parse(char *image_base, char **image_start,
+ unsigned long *image_len);
#endif /* __X86_BZIMAGE_H__ */
++++++ 23575-x86-DMI.patch ++++++
References: fate#311376, fate#311529, bnc#578927, bnc#628554
# HG changeset patch
# User Jan Beulich
# Date 1308825280 -3600
# Node ID 4d9598a6a7777c50e109d7e2eb6d1cb28bcb4509
# Parent d7644abc218d3232b9d957ce94fc4b4bcc1f456e
x86/DMI: use proper structures instead of byte offsets
Besides being (in my eyes) desirable cleanup, this at once represents
another prerequisite for native EFI booting support.
Signed-off-by: Jan Beulich
Index: xen-4.1.3-testing/xen/arch/x86/dmi_scan.c
===================================================================
--- xen-4.1.3-testing.orig/xen/arch/x86/dmi_scan.c
+++ xen-4.1.3-testing/xen/arch/x86/dmi_scan.c
@@ -12,11 +12,31 @@
#include
#include
-#define bt_ioremap(b,l) ((u8 *)__acpi_map_table(b,l))
+#define bt_ioremap(b,l) ((void *)__acpi_map_table(b,l))
#define bt_iounmap(b,l) ((void)0)
#define memcpy_fromio memcpy
#define alloc_bootmem(l) xmalloc_bytes(l)
+struct dmi_eps {
+ char anchor[5]; /* "_DMI_" */
+ u8 checksum;
+ u16 size;
+ u32 address;
+ u16 num_structures;
+ u8 revision;
+} __attribute__((packed));
+
+struct smbios_eps {
+ char anchor[4]; /* "_SM_" */
+ u8 checksum;
+ u8 length;
+ u8 major, minor;
+ u16 max_size;
+ u8 revision;
+ u8 _rsrvd_[5];
+ struct dmi_eps dmi;
+} __attribute__((packed));
+
struct dmi_header
{
u8 type;
@@ -92,62 +112,70 @@ static int __init dmi_table(u32 base, in
}
-inline static int __init dmi_checksum(u8 *buf)
+static inline bool_t __init dmi_checksum(const void __iomem *buf,
+ unsigned int len)
{
- u8 sum=0;
- int a;
+ u8 sum = 0;
+ const u8 *p = buf;
+ unsigned int a;
- for(a=0; a<15; a++)
- sum+=buf[a];
- return (sum==0);
+ for (a = 0; a < len; a++)
+ sum += p[a];
+ return sum == 0;
}
int __init dmi_get_table(u32 *base, u32 *len)
{
- u8 buf[15];
+ struct dmi_eps eps;
char __iomem *p, *q;
p = maddr_to_virt(0xF0000);
for (q = p; q < p + 0x10000; q += 16) {
- memcpy_fromio(buf, q, 15);
- if (memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf)) {
- *base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
- *len=buf[7]<<8|buf[6];
+ memcpy_fromio(&eps, q, 15);
+ if (memcmp(eps.anchor, "_DMI_", 5) == 0 &&
+ dmi_checksum(&eps, sizeof(eps))) {
+ *base = eps.address;
+ *len = eps.size;
return 0;
}
}
return -1;
}
+static int __init _dmi_iterate(const struct dmi_eps *dmi,
+ const struct smbios_eps __iomem *smbios,
+ void (*decode)(struct dmi_header *))
+{
+ u16 num = dmi->num_structures;
+ u16 len = dmi->size;
+ u32 base = dmi->address;
+
+ /*
+ * DMI version 0.0 means that the real version is taken from
+ * the SMBIOS version, which we may not know at this point.
+ */
+ if (dmi->revision)
+ printk(KERN_INFO "DMI %d.%d present.\n",
+ dmi->revision >> 4, dmi->revision & 0x0f);
+ else if (!smbios)
+ printk(KERN_INFO "DMI present.\n");
+ dmi_printk((KERN_INFO "%d structures occupying %d bytes.\n",
+ num, len));
+ dmi_printk((KERN_INFO "DMI table at 0x%08X.\n", base));
+ return dmi_table(base, len, num, decode);
+}
+
static int __init dmi_iterate(void (*decode)(struct dmi_header *))
{
- u8 buf[15];
+ struct dmi_eps eps;
char __iomem *p, *q;
p = maddr_to_virt(0xF0000);
for (q = p; q < p + 0x10000; q += 16) {
- memcpy_fromio(buf, q, 15);
- if (memcmp(buf, "_DMI_", 5)==0 && dmi_checksum(buf)) {
- u16 num=buf[13]<<8|buf[12];
- u16 len=buf[7]<<8|buf[6];
- u32 base=buf[11]<<24|buf[10]<<16|buf[9]<<8|buf[8];
-
- /*
- * DMI version 0.0 means that the real version is taken from
- * the SMBIOS version, which we don't know at this point.
- */
- if(buf[14]!=0)
- printk(KERN_INFO "DMI %d.%d present.\n",
- buf[14]>>4, buf[14]&0x0F);
- else
- printk(KERN_INFO "DMI present.\n");
- dmi_printk((KERN_INFO "%d structures occupying %d bytes.\n",
- num, len));
- dmi_printk((KERN_INFO "DMI table at 0x%08X.\n",
- base));
- if(dmi_table(base,len, num, decode)==0)
- return 0;
- }
+ memcpy_fromio(&eps, q, sizeof(eps));
+ if (memcmp(eps.anchor, "_DMI_", 5) == 0 &&
+ dmi_checksum(&eps, sizeof(eps)))
+ return _dmi_iterate(&eps, NULL, decode);
}
return -1;
}
++++++ 23576-x86_show_page_walk_also_for_early_page_faults.patch ++++++
changeset: 23576:e2235fe267eb
user: Jan Beulich
date: Thu Jun 23 11:35:55 2011 +0100
files: xen/arch/x86/mm.c xen/arch/x86/traps.c xen/arch/x86/x86_32/mm.c xen/arch/x86/x86_32/traps.c xen/arch/x86/x86_64/mm.c xen/arch/x86/x86_64/traps.c
description:
x86: show page walk also for early page faults
At once, move the common (between 32- and 64-bit) definition of
machine_to_phys_mapping_valid to a common location.
Signed-off-by: Jan Beulich
---
xen/arch/x86/mm.c | 2 ++
xen/arch/x86/traps.c | 1 +
xen/arch/x86/x86_32/mm.c | 2 --
xen/arch/x86/x86_32/traps.c | 9 ++++++---
xen/arch/x86/x86_64/mm.c | 2 --
xen/arch/x86/x86_64/traps.c | 12 ++++++++----
6 files changed, 17 insertions(+), 11 deletions(-)
Index: xen-4.1.5-testing/xen/arch/x86/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/mm.c
@@ -153,6 +153,8 @@ unsigned long __read_mostly pdx_group_va
(FRAMETABLE_SIZE / sizeof(*frame_table) + PDX_GROUP_COUNT - 1)
/ PDX_GROUP_COUNT)] = { [0] = 1 };
+bool_t __read_mostly machine_to_phys_mapping_valid = 0;
+
#define PAGE_CACHE_ATTRS (_PAGE_PAT|_PAGE_PCD|_PAGE_PWT)
bool_t __read_mostly opt_allow_superpage;
Index: xen-4.1.5-testing/xen/arch/x86/traps.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/traps.c
+++ xen-4.1.5-testing/xen/arch/x86/traps.c
@@ -1455,6 +1455,7 @@ void __init do_early_page_fault(struct c
unsigned long *stk = (unsigned long *)regs;
printk("Early fatal page fault at %04x:%p (cr2=%p, ec=%04x)\n",
regs->cs, _p(regs->eip), _p(cr2), regs->error_code);
+ show_page_walk(cr2);
printk("Stack dump: ");
while ( ((long)stk & ((PAGE_SIZE - 1) & ~(BYTES_PER_LONG - 1))) != 0 )
printk("%p ", _p(*stk++));
Index: xen-4.1.5-testing/xen/arch/x86/x86_32/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/x86_32/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/x86_32/mm.c
@@ -39,8 +39,6 @@ extern l1_pgentry_t l1_identmap[L1_PAGET
unsigned int __read_mostly PAGE_HYPERVISOR = __PAGE_HYPERVISOR;
unsigned int __read_mostly PAGE_HYPERVISOR_NOCACHE = __PAGE_HYPERVISOR_NOCACHE;
-bool_t __read_mostly machine_to_phys_mapping_valid = 0;
-
static unsigned long __read_mostly mpt_size;
void *alloc_xen_pagetable(void)
Index: xen-4.1.5-testing/xen/arch/x86/x86_32/traps.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/x86_32/traps.c
+++ xen-4.1.5-testing/xen/arch/x86/x86_32/traps.c
@@ -164,7 +164,8 @@ void show_page_walk(unsigned long addr)
l3t += (cr3 & 0xFE0UL) >> 3;
l3e = l3t[l3_table_offset(addr)];
mfn = l3e_get_pfn(l3e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L3[0x%03lx] = %"PRIpte" %08lx\n",
l3_table_offset(addr), l3e_get_intpte(l3e), pfn);
unmap_domain_page(l3t);
@@ -175,7 +176,8 @@ void show_page_walk(unsigned long addr)
l2t = map_domain_page(mfn);
l2e = l2t[l2_table_offset(addr)];
mfn = l2e_get_pfn(l2e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L2[0x%03lx] = %"PRIpte" %08lx %s\n",
l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
(l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
@@ -188,7 +190,8 @@ void show_page_walk(unsigned long addr)
l1t = map_domain_page(mfn);
l1e = l1t[l1_table_offset(addr)];
mfn = l1e_get_pfn(l1e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L1[0x%03lx] = %"PRIpte" %08lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
unmap_domain_page(l1t);
Index: xen-4.1.5-testing/xen/arch/x86/x86_64/mm.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/x86_64/mm.c
+++ xen-4.1.5-testing/xen/arch/x86/x86_64/mm.c
@@ -47,8 +47,6 @@ unsigned int __read_mostly pfn_pdx_hole_
unsigned int __read_mostly m2p_compat_vstart = __HYPERVISOR_COMPAT_VIRT_START;
-bool_t __read_mostly machine_to_phys_mapping_valid = 0;
-
/* Top-level master (and idle-domain) page directory. */
l4_pgentry_t __attribute__ ((__section__ (".bss.page_aligned")))
idle_pg_table[L4_PAGETABLE_ENTRIES];
Index: xen-4.1.5-testing/xen/arch/x86/x86_64/traps.c
===================================================================
--- xen-4.1.5-testing.orig/xen/arch/x86/x86_64/traps.c
+++ xen-4.1.5-testing/xen/arch/x86/x86_64/traps.c
@@ -176,7 +176,8 @@ void show_page_walk(unsigned long addr)
l4t = mfn_to_virt(mfn);
l4e = l4t[l4_table_offset(addr)];
mfn = l4e_get_pfn(l4e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L4[0x%03lx] = %"PRIpte" %016lx\n",
l4_table_offset(addr), l4e_get_intpte(l4e), pfn);
if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ||
@@ -186,7 +187,8 @@ void show_page_walk(unsigned long addr)
l3t = mfn_to_virt(mfn);
l3e = l3t[l3_table_offset(addr)];
mfn = l3e_get_pfn(l3e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L3[0x%03lx] = %"PRIpte" %016lx%s\n",
l3_table_offset(addr), l3e_get_intpte(l3e), pfn,
(l3e_get_flags(l3e) & _PAGE_PSE) ? " (PSE)" : "");
@@ -198,7 +200,8 @@ void show_page_walk(unsigned long addr)
l2t = mfn_to_virt(mfn);
l2e = l2t[l2_table_offset(addr)];
mfn = l2e_get_pfn(l2e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L2[0x%03lx] = %"PRIpte" %016lx %s\n",
l2_table_offset(addr), l2e_get_intpte(l2e), pfn,
(l2e_get_flags(l2e) & _PAGE_PSE) ? "(PSE)" : "");
@@ -210,7 +213,8 @@ void show_page_walk(unsigned long addr)
l1t = mfn_to_virt(mfn);
l1e = l1t[l1_table_offset(addr)];
mfn = l1e_get_pfn(l1e);
- pfn = mfn_valid(mfn) ? get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
+ pfn = mfn_valid(mfn) && machine_to_phys_mapping_valid ?
+ get_gpfn_from_mfn(mfn) : INVALID_M2P_ENTRY;
printk(" L1[0x%03lx] = %"PRIpte" %016lx\n",
l1_table_offset(addr), l1e_get_intpte(l1e), pfn);
}
++++++ 23577-tools_merge_several_bitop_functions_into_xc_bitops.h.patch ++++++
++++ 1023 lines (skipped)
++++++ 23578-xenpaging_add_xs_handle_to_struct_xenpaging.patch ++++++
changeset: 23578:7299a9a44b35
user: Olaf Hering
date: Wed Jun 22 14:47:09 2011 +0100
files: tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h
description:
xenpaging: add xs_handle to struct xenpaging
A xs_handle is currently used in the xc_mem_paging_flush_ioemu_cache()
function and will be used by a subsequent patch.
Add it to struct xenpaging.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
Acked-by: Ian Campbell
---
tools/xenpaging/xenpaging.c | 14 ++++++++++++++
tools/xenpaging/xenpaging.h | 1 +
2 files changed, 15 insertions(+)
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c
@@ -28,6 +28,7 @@
#include
#include
#include
+#include
#include
@@ -92,6 +93,14 @@ static xenpaging_t *xenpaging_init(domid
paging = malloc(sizeof(xenpaging_t));
memset(paging, 0, sizeof(xenpaging_t));
+ /* Open connection to xenstore */
+ paging->xs_handle = xs_open(0);
+ if ( paging->xs_handle == NULL )
+ {
+ ERROR("Error initialising xenstore connection");
+ goto err;
+ }
+
p = getenv("XENPAGING_POLICY_MRU_SIZE");
if ( p && *p )
{
@@ -221,6 +230,8 @@ static xenpaging_t *xenpaging_init(domid
err:
if ( paging )
{
+ if ( paging->xs_handle )
+ xs_close(paging->xs_handle);
xc_interface_close(xch);
if ( paging->mem_event.shared_page )
{
@@ -277,6 +288,9 @@ static int xenpaging_teardown(xenpaging_
}
paging->mem_event.xce_handle = NULL;
+ /* Close connection to xenstore */
+ xs_close(paging->xs_handle);
+
/* Close connection to Xen */
rc = xc_interface_close(xch);
if ( rc != 0 )
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h
@@ -36,6 +36,7 @@
typedef struct xenpaging {
xc_interface *xc_handle;
+ struct xs_handle *xs_handle;
xc_platform_info_t *platform_info;
xc_domaininfo_t *domain_info;
++++++ 23579-xenpaging_drop_xc.c_remove_ASSERT.patch ++++++
changeset: 23579:868c8c898f73
user: Olaf Hering
date: Fri Jun 10 10:47:06 2011 +0200
files: tools/xenpaging/policy_default.c tools/xenpaging/xc.h
description:
xenpaging: drop xc.c, remove ASSERT
The ASSERT is not needed, victim is never NULL.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/policy_default.c | 1 -
tools/xenpaging/xc.h | 7 -------
2 files changed, 8 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/policy_default.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/policy_default.c
+++ xen-4.1.2-testing/tools/xenpaging/policy_default.c
@@ -78,7 +78,6 @@ int policy_choose_victim(xenpaging_t *pa
{
xc_interface *xch = paging->xc_handle;
unsigned long wrap = current_gfn;
- ASSERT(victim != NULL);
do
{
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ xen-4.1.2-testing/tools/xenpaging/xc.h
@@ -30,13 +30,6 @@
#include
-#if 1
-#define ASSERT(_p) \
- if ( !(_p) ) { DPRINTF("Assertion '%s' failed, line %d, file %s", #_p , \
- __LINE__, __FILE__); *(int*)0=0; }
-#else
-#define ASSERT(_p) ((void)0)
-#endif
++++++ 23580-xenpaging_drop_xc.c_remove_xc_platform_info_t.patch ++++++
changeset: 23580:771b6984aa2a
user: Olaf Hering
date: Fri Jun 10 10:47:07 2011 +0200
files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h
description:
xenpaging: drop xc.c, remove xc_platform_info_t
xc_platform_info_t is not used in xenpaging.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/xc.c | 10 ----------
tools/xenpaging/xc.h | 8 --------
tools/xenpaging/xenpaging.c | 17 -----------------
tools/xenpaging/xenpaging.h | 1 -
4 files changed, 36 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/xc.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c
+++ xen-4.1.2-testing/tools/xenpaging/xc.c
@@ -26,7 +26,6 @@
#include
#include
#include
-#include
#include
#include "xc.h"
@@ -97,15 +96,6 @@ int xc_wait_for_event(xc_interface *xch,
return xc_wait_for_event_or_timeout(xch, xce, -1);
}
-int xc_get_platform_info(xc_interface *xc_handle, domid_t domain_id,
- xc_platform_info_t *platform_info)
-{
- return get_platform_info(xc_handle, domain_id,
- &platform_info->max_mfn,
- &platform_info->hvirt_start,
- &platform_info->pt_levels,
- &platform_info->guest_width);
-}
/*
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ xen-4.1.2-testing/tools/xenpaging/xc.h
@@ -34,12 +34,6 @@
-typedef struct xc_platform_info {
- unsigned long max_mfn;
- unsigned long hvirt_start;
- unsigned int pt_levels;
- unsigned int guest_width;
-} xc_platform_info_t;
@@ -47,8 +41,6 @@ int xc_mem_paging_flush_ioemu_cache(domi
int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce);
int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms);
-int xc_get_platform_info(xc_interface *xc_handle, domid_t domain_id,
- xc_platform_info_t *platform_info);
#endif // __XC_H__
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c
@@ -176,22 +176,6 @@ static xenpaging_t *xenpaging_init(domid
paging->mem_event.port = rc;
- /* Get platform info */
- paging->platform_info = malloc(sizeof(xc_platform_info_t));
- if ( paging->platform_info == NULL )
- {
- ERROR("Error allocating memory for platform info");
- goto err;
- }
-
- rc = xc_get_platform_info(xch, paging->mem_event.domain_id,
- paging->platform_info);
- if ( rc != 1 )
- {
- ERROR("Error getting platform info");
- goto err;
- }
-
/* Get domaininfo */
paging->domain_info = malloc(sizeof(xc_domaininfo_t));
if ( paging->domain_info == NULL )
@@ -246,7 +230,6 @@ static xenpaging_t *xenpaging_init(domid
}
free(paging->bitmap);
- free(paging->platform_info);
free(paging->domain_info);
free(paging);
}
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.h
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.h
@@ -38,7 +38,6 @@ typedef struct xenpaging {
xc_interface *xc_handle;
struct xs_handle *xs_handle;
- xc_platform_info_t *platform_info;
xc_domaininfo_t *domain_info;
unsigned long *bitmap;
++++++ 23581-xenpaging_drop_xc.c_remove_xc_wait_for_event.patch ++++++
changeset: 23581:9ce56626a5ab
user: Olaf Hering
date: Fri Jun 10 10:47:08 2011 +0200
files: tools/xenpaging/xc.c tools/xenpaging/xc.h
description:
xenpaging: drop xc.c, remove xc_wait_for_event
xc_wait_for_event is not used in xenpaging.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/xc.c | 4 ----
tools/xenpaging/xc.h | 1 -
2 files changed, 5 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/xc.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c
+++ xen-4.1.2-testing/tools/xenpaging/xc.c
@@ -91,10 +91,6 @@ int xc_wait_for_event_or_timeout(xc_inte
return -errno;
}
-int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce)
-{
- return xc_wait_for_event_or_timeout(xch, xce, -1);
-}
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ xen-4.1.2-testing/tools/xenpaging/xc.h
@@ -38,7 +38,6 @@
int xc_mem_paging_flush_ioemu_cache(domid_t domain_id);
-int xc_wait_for_event(xc_interface *xch, xc_evtchn *xce);
int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms);
++++++ 23582-xenpaging_drop_xc.c_move_xc_mem_paging_flush_ioemu_cache.patch ++++++
changeset: 23582:480e548fe76b
user: Olaf Hering
date: Fri Jun 10 10:47:10 2011 +0200
files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c
description:
xenpaging: drop xc.c, move xc_mem_paging_flush_ioemu_cache
Move xc_mem_paging_flush_ioemu_cache() into xenpaging and massage it a bit to
use the required members from xenpaging_t.
Also update type of rc to match xs_write() return value.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/xc.c | 18 ------------------
tools/xenpaging/xc.h | 1 -
tools/xenpaging/xenpaging.c | 16 +++++++++++++++-
3 files changed, 15 insertions(+), 20 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/xc.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c
+++ xen-4.1.2-testing/tools/xenpaging/xc.c
@@ -31,24 +31,6 @@
-int xc_mem_paging_flush_ioemu_cache(domid_t domain_id)
-{
- struct xs_handle *xsh = NULL;
- char path[80];
- int rc;
-
- sprintf(path, "/local/domain/0/device-model/%u/command", domain_id);
-
- xsh = xs_daemon_open();
- if ( xsh == NULL )
- return -EIO;
-
- rc = xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache"));
-
- xs_daemon_close(xsh);
-
- return rc ? 0 : -1;
-}
int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms)
{
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ xen-4.1.2-testing/tools/xenpaging/xc.h
@@ -37,7 +37,6 @@
-int xc_mem_paging_flush_ioemu_cache(domid_t domain_id);
int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms);
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c
@@ -48,6 +48,20 @@ static void close_handler(int sig)
unlink(filename);
}
+static int xenpaging_mem_paging_flush_ioemu_cache(xenpaging_t *paging)
+{
+ struct xs_handle *xsh = paging->xs_handle;
+ domid_t domain_id = paging->mem_event.domain_id;
+ char path[80];
+ bool rc;
+
+ sprintf(path, "/local/domain/0/device-model/%u/command", domain_id);
+
+ rc = xs_write(xsh, XBT_NULL, path, "flush-cache", strlen("flush-cache"));
+
+ return rc == true ? 0 : -1;
+}
+
static void *init_page(void)
{
void *buffer;
@@ -484,7 +498,7 @@ static int evict_victim(xenpaging_t *pag
else
{
if ( j++ % 1000 == 0 )
- if ( xc_mem_paging_flush_ioemu_cache(paging->mem_event.domain_id) )
+ if ( xenpaging_mem_paging_flush_ioemu_cache(paging) )
ERROR("Error flushing ioemu cache");
}
}
++++++ 23583-xenpaging_drop_xc.c_move_xc_wait_for_event_or_timeout.patch ++++++
changeset: 23583:235d8fdcb3a9
user: Olaf Hering
date: Fri Jun 10 10:47:11 2011 +0200
files: tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c
description:
xenpaging: drop xc.c, move xc_wait_for_event_or_timeout
Move xc_wait_for_event_or_timeout() into xenpaging and massage it a bit for
further changes in subsequent patches.
Include poll.h instead of sys/poll.h.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/xc.c | 40 ------------------------------------
tools/xenpaging/xc.h | 1
tools/xenpaging/xenpaging.c | 48 +++++++++++++++++++++++++++++++++++++++++---
3 files changed, 45 insertions(+), 44 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/xc.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c
+++ xen-4.1.2-testing/tools/xenpaging/xc.c
@@ -32,46 +32,6 @@
-int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms)
-{
- struct pollfd fd = { .fd = xc_evtchn_fd(xce), .events = POLLIN | POLLERR };
- int port;
- int rc;
-
- rc = poll(&fd, 1, ms);
- if ( rc == -1 )
- {
- if (errno == EINTR)
- return 0;
-
- ERROR("Poll exited with an error");
- goto err;
- }
-
- if ( rc == 1 )
- {
- port = xc_evtchn_pending(xce);
- if ( port == -1 )
- {
- ERROR("Failed to read port from event channel");
- goto err;
- }
-
- rc = xc_evtchn_unmask(xce, port);
- if ( rc != 0 )
- {
- ERROR("Failed to unmask event channel port");
- goto err;
- }
- }
- else
- port = -1;
-
- return port;
-
- err:
- return -errno;
-}
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ xen-4.1.2-testing/tools/xenpaging/xc.h
@@ -37,7 +37,6 @@
-int xc_wait_for_event_or_timeout(xc_interface *xch, xc_evtchn *xce, unsigned long ms);
Index: xen-4.1.2-testing/tools/xenpaging/xenpaging.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xenpaging.c
+++ xen-4.1.2-testing/tools/xenpaging/xenpaging.c
@@ -27,6 +27,7 @@
#include
#include
#include
+#include
#include
#include
@@ -62,6 +63,47 @@ static int xenpaging_mem_paging_flush_io
return rc == true ? 0 : -1;
}
+static int xenpaging_wait_for_event_or_timeout(xenpaging_t *paging)
+{
+ xc_interface *xch = paging->xc_handle;
+ xc_evtchn *xce = paging->mem_event.xce_handle;
+ struct pollfd fd[1];
+ int port;
+ int rc;
+
+ fd[0].fd = xc_evtchn_fd(xce);
+ fd[0].events = POLLIN | POLLERR;
+ rc = poll(fd, 1, 100);
+ if ( rc < 0 )
+ {
+ if (errno == EINTR)
+ return 0;
+
+ ERROR("Poll exited with an error");
+ return -errno;
+ }
+
+ if ( rc && fd[0].revents & POLLIN )
+ {
+ DPRINTF("Got event from evtchn\n");
+ port = xc_evtchn_pending(xce);
+ if ( port == -1 )
+ {
+ ERROR("Failed to read port from event channel");
+ rc = -1;
+ goto err;
+ }
+
+ rc = xc_evtchn_unmask(xce, port);
+ if ( rc < 0 )
+ {
+ ERROR("Failed to unmask event channel port");
+ }
+ }
+err:
+ return rc;
+}
+
static void *init_page(void)
{
void *buffer;
@@ -598,13 +640,13 @@ int main(int argc, char *argv[])
while ( !interrupted )
{
/* Wait for Xen to signal that a page needs paged in */
- rc = xc_wait_for_event_or_timeout(xch, paging->mem_event.xce_handle, 100);
- if ( rc < -1 )
+ rc = xenpaging_wait_for_event_or_timeout(paging);
+ if ( rc < 0 )
{
ERROR("Error getting event");
goto out;
}
- else if ( rc != -1 )
+ else if ( rc != 0 )
{
DPRINTF("Got event from Xen\n");
}
++++++ 23584-xenpaging_drop_xc.c_remove_xc_files.patch ++++++
changeset: 23584:e30cff57b146
user: Olaf Hering
date: Fri Jun 10 10:47:12 2011 +0200
files: tools/xenpaging/Makefile tools/xenpaging/mem_event.h tools/xenpaging/xc.c tools/xenpaging/xc.h tools/xenpaging/xenpaging.c tools/xenpaging/xenpaging.h
description:
xenpaging: drop xc.c, remove xc files
Finally remove xc.c/xc.h and its references since both are empty now.
Signed-off-by: Olaf Hering
Committed-by: Ian Jackson
---
tools/xenpaging/Makefile | 2 -
tools/xenpaging/mem_event.h | 1
tools/xenpaging/xc.c | 47 --------------------------------------
tools/xenpaging/xc.h | 54 --------------------------------------------
tools/xenpaging/xenpaging.c | 1
tools/xenpaging/xenpaging.h | 1
6 files changed, 1 insertion(+), 105 deletions(-)
Index: xen-4.1.2-testing/tools/xenpaging/Makefile
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/Makefile
+++ xen-4.1.2-testing/tools/xenpaging/Makefile
@@ -9,7 +9,7 @@ LDLIBS += $(LDLIBS_libxenctrl) $(LDLIBS
POLICY = default
SRC :=
-SRCS += file_ops.c xc.c xenpaging.c policy_$(POLICY).c
+SRCS += file_ops.c xenpaging.c policy_$(POLICY).c
CFLAGS += -Werror
CFLAGS += -Wno-unused
Index: xen-4.1.2-testing/tools/xenpaging/mem_event.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/mem_event.h
+++ xen-4.1.2-testing/tools/xenpaging/mem_event.h
@@ -25,7 +25,6 @@
#define __XEN_MEM_EVENT_H__
-#include "xc.h"
#include
#include
Index: xen-4.1.2-testing/tools/xenpaging/xc.c
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.c
+++ /dev/null
@@ -1,47 +0,0 @@
-/******************************************************************************
- * tools/xenpaging/lib/xc.c
- *
- * libxc-type add-ons for paging support.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include
-#include
-#include
-#include
-#include
-#include
-#include "xc.h"
-
-
-
-
-
-
-
-
-/*
- * Local variables:
- * mode: C
- * c-set-style: "BSD"
- * c-basic-offset: 4
- * tab-width: 4
- * indent-tabs-mode: nil
- * End:
- */
Index: xen-4.1.2-testing/tools/xenpaging/xc.h
===================================================================
--- xen-4.1.2-testing.orig/tools/xenpaging/xc.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/******************************************************************************
- * tools/xenpaging/lib/xc.h
- *
- * libxc add-ons.
- *
- * Copyright (c) 2009 Citrix Systems, Inc. (Patrick Colp)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#ifndef __XC_H__
-#define __XC_H__
-
-
-#include