openSUSE Commits
Threads by month
- ----- 2025 -----
- January
- ----- 2024 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2023 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2022 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2021 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2020 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2019 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2018 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2017 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2016 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2015 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2014 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2013 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2012 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2011 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2010 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2009 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2008 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2007 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
- February
- January
- ----- 2006 -----
- December
- November
- October
- September
- August
- July
- June
- May
- April
- March
September 2022
- 1 participants
- 2392 discussions
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package gnome-epub-thumbnailer for openSUSE:Factory checked in at 2022-09-30 17:58:43
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/gnome-epub-thumbnailer (Old)
and /work/SRC/openSUSE:Factory/.gnome-epub-thumbnailer.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "gnome-epub-thumbnailer"
Fri Sep 30 17:58:43 2022 rev:8 rq:1007221 version:1.7
Changes:
--------
--- /work/SRC/openSUSE:Factory/gnome-epub-thumbnailer/gnome-epub-thumbnailer.changes 2019-10-31 22:18:35.711778338 +0100
+++ /work/SRC/openSUSE:Factory/.gnome-epub-thumbnailer.new.2275/gnome-epub-thumbnailer.changes 2022-09-30 17:59:03.273410233 +0200
@@ -1,0 +2,9 @@
+Fri Sep 30 09:03:53 UTC 2022 - Bj��rn Lie <bjorn.lie(a)gmail.com>
+
+- Update to version 1.7:
+ + Fix thumbnailing of books with ePub 3.2 "cover-image".
+ + Work-around a broken ePub not getting a cover.
+ + Port build system to meson.
+- Add meson BuildRequires and macros following upstreams port.
+
+-------------------------------------------------------------------
Old:
----
gnome-epub-thumbnailer-1.6.tar.xz
New:
----
gnome-epub-thumbnailer-1.7.tar.xz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ gnome-epub-thumbnailer.spec ++++++
--- /var/tmp/diff_new_pack.BwWfkt/_old 2022-09-30 17:59:03.701411148 +0200
+++ /var/tmp/diff_new_pack.BwWfkt/_new 2022-09-30 17:59:03.705411156 +0200
@@ -1,7 +1,7 @@
#
# spec file for package gnome-epub-thumbnailer
#
-# Copyright (c) 2019 SUSE LINUX GmbH, Nuernberg, Germany.
+# Copyright (c) 2022 SUSE LLC
# Copyright (c) 2013 Dominique Leuenberger, Amsterdam, The Netherlands
#
# All modifications and additions to the file contributed by third parties
@@ -18,7 +18,7 @@
Name: gnome-epub-thumbnailer
-Version: 1.6
+Version: 1.7
Release: 0
Summary: Thumbnailer for EPub books
License: GPL-2.0-or-later
@@ -26,6 +26,7 @@
URL: https://git.gnome.org/browse/gnome-epub-thumbnailer
Source0: https://download.gnome.org/sources/gnome-epub-thumbnailer/%{version}/%{name…
+BuildRequires: meson
BuildRequires: pkgconfig
BuildRequires: pkgconfig(gdk-pixbuf-2.0)
BuildRequires: pkgconfig(gio-2.0)
@@ -39,15 +40,15 @@
%autosetup
%build
-%configure
-make %{?_smp_mflags}
+%meson
+%meson_build
%install
-%make_install
+%meson_install
%files
%license COPYING
-%doc ChangeLog README
+%doc NEWS README
%{_bindir}/gnome-epub-thumbnailer
%{_bindir}/gnome-mobi-thumbnailer
%dir %{_datadir}/thumbnailers/
++++++ gnome-epub-thumbnailer-1.6.tar.xz -> gnome-epub-thumbnailer-1.7.tar.xz ++++++
++++ 11719 lines of diff (skipped)
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package openmpi3 for openSUSE:Factory checked in at 2022-09-30 17:58:38
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/openmpi3 (Old)
and /work/SRC/openSUSE:Factory/.openmpi3.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "openmpi3"
Fri Sep 30 17:58:38 2022 rev:29 rq:1007181 version:3.1.6
Changes:
--------
--- /work/SRC/openSUSE:Factory/openmpi3/openmpi3.changes 2022-06-13 13:03:41.057211721 +0200
+++ /work/SRC/openSUSE:Factory/.openmpi3.new.2275/openmpi3.changes 2022-09-30 17:59:01.397406222 +0200
@@ -1,0 +2,17 @@
+Wed Jul 6 12:29:12 UTC 2022 - Nicolas Morey-Chaisemartin <nmoreychaisemartin(a)suse.com>
+
+- Enable libfabric on all arch
+- Switch to external libevent for all flavors
+- Switch to external hwloc and PMIx for HPC builds
+- Add patches to fix compilation against hwloc v2.x
+ - Revert-hwloc-bring-back-the-configure-error-when-external-hwloc-is-2.0.patch
+ - hwloc-fix-hwloc-shmem.h-in-the-external-case.patch
+ - hwloc-updates-for-hwloc-2.0.x-API.patch
+- Backport upstream patches for external PMIx support
+ - pmix-pmix2x-Fix-the-PMIx-discovery-logic.patch
+ - pmix-Fix-detection-of-Externally-built-PMIx.patch
+- Update rpmlintrc file to ignore missing libname suffix in libopenmpi packages
+- Add patch btl-openib-Add-support-for-newer-hardware.patch to support
+ newer RDMA hardware (ConnectX 4 and 5 VF, ConnectX6 PF + VF and Broadcom adapters)
+
+-------------------------------------------------------------------
New:
----
Revert-hwloc-bring-back-the-configure-error-when-external-hwloc-is-2.0.patch
btl-openib-Add-support-for-newer-hardware.patch
hwloc-fix-hwloc-shmem.h-in-the-external-case.patch
hwloc-updates-for-hwloc-2.0.x-API.patch
pmix-Fix-detection-of-Externally-built-PMIx.patch
pmix-pmix2x-Fix-the-PMIx-discovery-logic.patch
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ openmpi3.spec ++++++
--- /var/tmp/diff_new_pack.JS8vQa/_old 2022-09-30 17:59:02.089407702 +0200
+++ /var/tmp/diff_new_pack.JS8vQa/_new 2022-09-30 17:59:02.093407711 +0200
@@ -146,6 +146,11 @@
%global hpc_openmpi_dep_version %(VER=%m_f_ver; echo -n ${VER})
%global hpc_openmpi_dir openmpi%{hpc_openmpi_dep_version}
%global hpc_openmpi_pack_version %{hpc_openmpi_dep_version}
+%{bcond_without pmix}
+%{bcond_without hwloc}
+%else
+%{bcond_with pmix}
+%{bcond_with hwloc}
%endif
%define git_ver .0.ea348728b4c8
@@ -174,12 +179,20 @@
Patch3: fix-rdma-component-selection.patch
Patch4: Fix-error-with-stricter-quoting-requirements-of-autoconf-2.70.patch
Patch5: Always-include-the-stddef.h-header.patch
+Patch6: Revert-hwloc-bring-back-the-configure-error-when-external-hwloc-is-2.0.patch
+Patch7: hwloc-fix-hwloc-shmem.h-in-the-external-case.patch
+Patch8: hwloc-updates-for-hwloc-2.0.x-API.patch
+Patch9: pmix-pmix2x-Fix-the-PMIx-discovery-logic.patch
+Patch10: pmix-Fix-detection-of-Externally-built-PMIx.patch
+Patch11: btl-openib-Add-support-for-newer-hardware.patch
Provides: mpi
BuildRoot: %{_tmppath}/%{name}-%{version}-build
BuildRequires: autoconf
BuildRequires: automake
BuildRequires: fdupes
BuildRequires: flex
+BuildRequires: libevent-devel
+BuildRequires: libfabric-devel
BuildRequires: libibumad-devel
BuildRequires: libibverbs-devel
BuildRequires: libtool
@@ -195,6 +208,12 @@
BuildRequires: libucs-devel
BuildRequires: libuct-devel
%endif
+%if %{with hwloc}
+BuildRequires: hwloc-devel
+%endif
+%if %{with pmix}
+BuildRequires: pmix-devel
+%endif
%if %{without hpc}
BuildRequires: Modules
BuildRequires: gcc-c++
@@ -215,7 +234,6 @@
%ifarch %{ix86} x86_64
BuildRequires: infinipath-psm-devel
-BuildRequires: libfabric-devel
%endif
%ifarch x86_64
@@ -393,9 +411,11 @@
Group: Development/Libraries/Parallel
Provides: openmpi-runtime-config = %{version}
Conflicts: otherproviders(openmpi-runtime-config)
+%if %{without pmix}
# OpenMPI3 is PMIx enabled
Provides: pmix-runtime-config = %{version}
Conflicts: otherproviders(pmix-runtime-config)
+%endif
%description -n %{pname}%{m_f_ver}-config
OpenMPI is an implementation of the Message Passing Interface, a
@@ -442,6 +462,13 @@
%patch3
%patch4
%patch5
+%patch6
+%patch7
+%patch8
+%patch9
+%patch10
+%patch11
+
# Live patch the VERSION file
sed -i -e 's/^greek=.*$/greek=%{git_ver}/' -e 's/^repo_rev=.*$/repo_rev=%{version}%{git_ver}/' \
-e 's/^date=.*$/date="OpenMPI %{version} Distribution for SUSE"/' VERSION
@@ -476,9 +503,17 @@
--enable-builtin-atomics \
--with-libltdl=%{_prefix} \
--with-verbs \
+ --with-libfabric \
--enable-mpi-thread-multiple \
--disable-wrapper-rpath \
--with-slurm \
+ --with-libevent=external \
+%if %{with hwloc}
+ --with-hwloc=external \
+%endif
+%if %{with pmix}
+ --with-pmix=external \
+%endif
%if 0%{?with_ucx}
--with-ucx \
--with-ucx-libdir=/usr/%_lib \
@@ -628,8 +663,10 @@
rm -f %{buildroot}%{_sysconfdir}/openmpi-default-hostfile
rm -f %{buildroot}%{_sysconfdir}/openmpi-mca-params.conf
rm -f %{buildroot}%{_sysconfdir}/openmpi-totalview.tcl
+%if %{without pmix}
rm -f %{buildroot}%{_sysconfdir}/pmix-mca-params.conf
%endif
+%endif
%if %{without hpc}
%post
@@ -714,8 +751,11 @@
%{mpi_datadir}/openmpi/mca-btl-openib-device-params.ini
%{mpi_datadir}/openmpi/*-data.txt
%{mpi_datadir}/openmpi/help-*.txt
+%if %{without pmix}
%dir %{mpi_datadir}/pmix
%{mpi_datadir}/pmix/help-*.txt
+%{mpi_datadir}/pmix/help-*.txt
+%endif
%files %{!?with_hpc:libs}%{?with_hpc:-n lib%{name}}
%defattr(-,root,root)
@@ -723,7 +763,7 @@
%dir %mpi_libdir/
%mpi_libdir/*.so.*
%{mpi_libdir}/openmpi/*.so
-%if 0%{!?build_static_devel:1}
+%if %{without pmix}
%dir %mpi_libdir/pmix/
%{mpi_libdir}/pmix/*.so
%endif
@@ -761,7 +801,9 @@
%{mpi_bindir}/shmemcxx
%{mpi_bindir}/shmemfort
%{mpi_datadir}/openmpi/openmpi-valgrind.supp
+%if %{without pmix}
%{mpi_datadir}/pmix/pmix-valgrind.supp
+%endif
%files docs
%defattr(-, root, root, -)
@@ -788,7 +830,9 @@
%files -n %{pname}%{m_f_ver}-config
%config %{_sysconfdir}/openmpi-default-hostfile
%config %{_sysconfdir}/openmpi-mca-params.conf
+%if %{without pmix}
%config %{_sysconfdir}/pmix-mca-params.conf
+%endif
%{_sysconfdir}/openmpi-totalview.tcl
%endif
++++++ Revert-hwloc-bring-back-the-configure-error-when-external-hwloc-is-2.0.patch ++++++
commit b80cf0f5de7720f71189f594125f986193c12f81
Author: Nicolas Morey-Chaisemartin <nmoreychaisemartin(a)suse.com>
Date: Tue Jul 12 09:36:25 2022 +0200
Revert "hwloc: bring back the configure error when external hwloc is >= 2.0"
This reverts commit 9eb37be46c75662eb4f6c945144121e3e55360be.
diff --git opal/mca/hwloc/external/configure.m4 opal/mca/hwloc/external/configure.m4
index d951c9a92f1b..411d8ad1c1f2 100644
--- opal/mca/hwloc/external/configure.m4
+++ opal/mca/hwloc/external/configure.m4
@@ -184,21 +184,7 @@ AC_DEFUN([MCA_opal_hwloc_external_CONFIG],[
[AC_MSG_RESULT([yes])],
[AC_MSG_RESULT([no])
AC_MSG_ERROR([Cannot continue])])
- AC_MSG_CHECKING([if external hwloc version is lower than 2.0])
- AS_IF([test "$opal_hwloc_dir" != ""],
- [opal_hwloc_external_CFLAGS_save=$CFLAGS
- CFLAGS="-I$opal_hwloc_dir/include $opal_hwloc_external_CFLAGS_save"])
- AC_COMPILE_IFELSE(
- [AC_LANG_PROGRAM([[#include <hwloc.h>]],
- [[
-#if HWLOC_API_VERSION >= 0x00020000
-#error "hwloc API version is greater or equal than 0x00020000"
-#endif
- ]])],
- [AC_MSG_RESULT([yes])],
- [AC_MSG_RESULT([no])
- AC_MSG_ERROR([OMPI does not currently support hwloc v2 API
-Cannot continue])])
+
AS_IF([test "$opal_hwloc_dir" != ""],
[CFLAGS=$opal_hwloc_external_CFLAGS_save])
++++++ btl-openib-Add-support-for-newer-hardware.patch ++++++
commit 64434b46183221882cb767dc29b25cc90576b9ef
Author: Nicolas Morey-Chaisemartin <nmoreychaisemartin(a)suse.com>
Date: Tue Sep 13 15:48:59 2022 +0200
btl: openib: Add support for newer hardware
- ConnectX4 VF
- ConnectX5 VF
- ConnectX6 VF
- New broadcom PCI Ids
Signed-off-by: Nicolas Morey-Chaisemartin <nmoreychaisemartin(a)suse.com>
fix
diff --git opal/mca/btl/openib/mca-btl-openib-device-params.ini opal/mca/btl/openib/mca-btl-openib-device-params.ini
index b51a7dcdde78..b9949de235b8 100644
--- opal/mca/btl/openib/mca-btl-openib-device-params.ini
+++ opal/mca/btl/openib/mca-btl-openib-device-params.ini
@@ -174,7 +174,7 @@ max_inline_data = 256
[Mellanox ConnectX4]
vendor_id = 0x2c9,0x5ad,0x66a,0x8f1,0x1708,0x03ba,0x15b3,0x119f
-vendor_part_id = 4115,4117
+vendor_part_id = 4115,4116,4117,4118
use_eager_rdma = 1
mtu = 4096
max_inline_data = 256
@@ -183,7 +183,7 @@ max_inline_data = 256
[Mellanox ConnectX5]
vendor_id = 0x2c9,0x5ad,0x66a,0x8f1,0x1708,0x03ba,0x15b3,0x119f
-vendor_part_id = 4119,4121
+vendor_part_id = 4119,4120,4121,4122
use_eager_rdma = 1
mtu = 4096
max_inline_data = 256
@@ -192,7 +192,7 @@ max_inline_data = 256
[Mellanox ConnectX6]
vendor_id = 0x2c9,0x5ad,0x66a,0x8f1,0x1708,0x03ba,0x15b3,0x119f
-vendor_part_id = 4123
+vendor_part_id = 4123,4124
use_eager_rdma = 1
mtu = 4096
max_inline_data = 256
@@ -345,7 +345,7 @@ max_inline_data = 72
[Broadcom BCM57XXX]
vendor_id = 0x14e4
-vendor_part_id = 0x1605,0x1606,0x1614,0x16c0,0x16c1,0x16ce,0x16cf,0x16d6,0x16d7,0x16d8,0x16d9,0x16df,0x16e2,0x16e3,0x16e5,0x16eb,0x16ed,0x16ef,0x16f0,0x16f1,0x1750
+vendor_part_id = 0x1605,0x1606,0x1614,0x16c0,0x16c1,0x16ce,0x16cf,0x16d6,0x16d7,0x16d8,0x16d9,0x16df,0x16e2,0x16e3,0x16e5,0x16eb,0x16ed,0x16ef,0x16f0,0x16f1
use_eager_rdma = 1
mtu = 1024
receive_queues = P,65536,256,192,128
++++++ hwloc-fix-hwloc-shmem.h-in-the-external-case.patch ++++++
commit 6a2d129fd28ca90c5c3f857eeb5ff933abc33470
Author: Brice Goglin <Brice.Goglin(a)inria.fr>
Date: Fri Jan 26 15:44:15 2018 +0100
hwloc: fix hwloc/shmem.h in the external case
Signed-off-by: Brice Goglin <Brice.Goglin(a)inria.fr>
diff --git opal/mca/hwloc/base/hwloc_base_util.c opal/mca/hwloc/base/hwloc_base_util.c
index f8ca72c182d1..dd483a7dc1be 100644
--- opal/mca/hwloc/base/hwloc_base_util.c
+++ opal/mca/hwloc/base/hwloc_base_util.c
@@ -27,6 +27,7 @@
* $HEADER$
*/
+#define OPAL_HWLOC_WANT_SHMEM 1
#include "opal_config.h"
diff --git opal/mca/hwloc/external/configure.m4 opal/mca/hwloc/external/configure.m4
index 411d8ad1c1f2..b8214099ef84 100644
--- opal/mca/hwloc/external/configure.m4
+++ opal/mca/hwloc/external/configure.m4
@@ -63,12 +63,17 @@ AC_DEFUN([MCA_opal_hwloc_external_POST_CONFIG],[
# the MCA_hwloc_external_openfabrics_helper define).
AS_IF([test "$opal_hwloc_dir" != ""],
[opal_hwloc_include="$opal_hwloc_dir/include/hwloc.h"
+ opal_hwloc_shmem_include="$opal_hwloc_dir/include/hwloc/shmem.h",
opal_hwloc_openfabrics_include="$opal_hwloc_dir/include/hwloc/openfabrics-verbs.h"],
[opal_hwloc_include="hwloc.h"
+ opal_hwloc_shmem_include="hwloc/shmem.h"
opal_hwloc_openfabrics_include="hwloc/openfabrics-verbs.h"])
AC_DEFINE_UNQUOTED(MCA_hwloc_external_header,
["$opal_hwloc_include"],
[Location of external hwloc header])
+ AC_DEFINE_UNQUOTED(MCA_hwloc_external_shmem_header,
+ ["$opal_hwloc_shmem_include"],
+ [Location of external hwloc shmem header])
AC_DEFINE_UNQUOTED(MCA_hwloc_external_openfabrics_header,
["$opal_hwloc_openfabrics_include"],
[Location of external hwloc OpenFabrics header])
diff --git opal/mca/hwloc/external/external.h opal/mca/hwloc/external/external.h
index 6558a0bcbd14..0e1a91fcf470 100644
--- opal/mca/hwloc/external/external.h
+++ opal/mca/hwloc/external/external.h
@@ -43,6 +43,14 @@ BEGIN_C_DECLS
# endif
#endif
+#if defined(OPAL_HWLOC_WANT_SHMEM) && OPAL_HWLOC_WANT_SHMEM
+# if HWLOC_API_VERSION >= 0x20000
+# include MCA_hwloc_external_shmem_header
+# else
+# error Tried to include hwloc shmem header, but hwloc < 2.0 found
+# endif
+#endif
+
#if HWLOC_API_VERSION < 0x00010b00
#define HWLOC_OBJ_NUMANODE HWLOC_OBJ_NODE
#define HWLOC_OBJ_PACKAGE HWLOC_OBJ_SOCKET
diff --git orte/mca/rtc/hwloc/rtc_hwloc.c orte/mca/rtc/hwloc/rtc_hwloc.c
index b832d4cf6bc9..8acdf572b577 100644
--- orte/mca/rtc/hwloc/rtc_hwloc.c
+++ orte/mca/rtc/hwloc/rtc_hwloc.c
@@ -9,6 +9,8 @@
* $HEADER$
*/
+#define OPAL_HWLOC_WANT_SHMEM 1
+
#include "orte_config.h"
#include "orte/constants.h"
#include "orte/types.h"
++++++ hwloc-updates-for-hwloc-2.0.x-API.patch ++++++
commit 287d7866d3bd70c1a8d412ffb8e6b12cd7e903d5
Author: Jeff Squyres <jsquyres(a)cisco.com>
Date: Thu Jan 4 09:58:50 2018 -0800
hwloc: updates for hwloc 2.0.x API
Signed-off-by: Jeff Squyres <jsquyres(a)cisco.com>
diff --git opal/mca/btl/openib/btl_openib_component.c opal/mca/btl/openib/btl_openib_component.c
index c482dcdbcda7..facf338a5373 100644
--- opal/mca/btl/openib/btl_openib_component.c
+++ opal/mca/btl/openib/btl_openib_component.c
@@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
- * Copyright (c) 2006-2017 Cisco Systems, Inc. All rights reserved
+ * Copyright (c) 2006-2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2006-2015 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006-2015 Los Alamos National Security, LLC. All rights
* reserved.
@@ -1524,7 +1524,11 @@ static uint64_t calculate_total_mem (void)
if (NULL == machine) {
return 0;
}
+#if HWLOC_API_VERSION < 0x20000
return machine->memory.total_memory;
+#else
+ return machine->total_memory;
+#endif
}
/* couldn't find it */
diff --git opal/mca/hwloc/base/hwloc_base_util.c opal/mca/hwloc/base/hwloc_base_util.c
index dd483a7dc1be..61d8b779888c 100644
--- opal/mca/hwloc/base/hwloc_base_util.c
+++ opal/mca/hwloc/base/hwloc_base_util.c
@@ -10,7 +10,7 @@
* University of Stuttgart. All rights reserved.
* Copyright (c) 2004-2005 The Regents of the University of California.
* All rights reserved.
- * Copyright (c) 2011-2017 Cisco Systems, Inc. All rights reserved
+ * Copyright (c) 2011-2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2012-2017 Los Alamos National Security, LLC.
* All rights reserved.
* Copyright (c) 2013-2018 Intel, Inc. All rights reserved.
@@ -59,6 +59,11 @@
#include "opal/mca/hwloc/hwloc-internal.h"
#include "opal/mca/hwloc/base/base.h"
+#if HWLOC_API_VERSION >= 0x20000
+// JMS Is this right?
+#include "hwloc/shmem.h"
+#endif
+
static bool topo_in_shmem = false;
/*
@@ -154,7 +159,7 @@ int opal_hwloc_base_filter_cpus(hwloc_topology_t topo)
avail = hwloc_bitmap_alloc();
hwloc_bitmap_and(avail, root->online_cpuset, root->allowed_cpuset);
#else
- avail = hwloc_bitmap_dup(root->allowed_cpuset);
+ avail = hwloc_bitmap_dup(root->cpuset);
#endif
OPAL_OUTPUT_VERBOSE((5, opal_hwloc_base_framework.framework_output,
"hwloc:base: no cpus specified - using root available cpuset"));
@@ -178,7 +183,7 @@ int opal_hwloc_base_filter_cpus(hwloc_topology_t topo)
hwloc_bitmap_and(pucpus, pu->online_cpuset, pu->allowed_cpuset);
#else
hwloc_bitmap_free(pucpus);
- pucpus = hwloc_bitmap_dup(pu->allowed_cpuset);
+ pucpus = hwloc_bitmap_dup(pu->cpuset);
#endif
hwloc_bitmap_or(res, avail, pucpus);
hwloc_bitmap_copy(avail, res);
@@ -200,7 +205,7 @@ int opal_hwloc_base_filter_cpus(hwloc_topology_t topo)
hwloc_bitmap_and(pucpus, pu->online_cpuset, pu->allowed_cpuset);
#else
hwloc_bitmap_free(pucpus);
- pucpus = hwloc_bitmap_dup(pu->allowed_cpuset);
+ pucpus = hwloc_bitmap_dup(pu->cpuset);
#endif
hwloc_bitmap_or(res, avail, pucpus);
hwloc_bitmap_copy(avail, res);
diff --git orte/mca/rtc/hwloc/rtc_hwloc.c orte/mca/rtc/hwloc/rtc_hwloc.c
index 8acdf572b577..16d5b825e0c7 100644
--- orte/mca/rtc/hwloc/rtc_hwloc.c
+++ orte/mca/rtc/hwloc/rtc_hwloc.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2014-2017 Intel, Inc. All rights reserved.
- * Copyright (c) 2017 Cisco Systems, Inc. All rights reserved
+ * Copyright (c) 2017-2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2017 Inria. All rights reserved.
* $COPYRIGHT$
*
@@ -50,6 +50,11 @@
#include "orte/mca/rtc/base/base.h"
#include "rtc_hwloc.h"
+#if HWLOC_API_VERSION >= 0x20000
+// JMS Is this right?
+#include "hwloc/shmem.h"
+#endif
+
static int init(void);
static void finalize(void);
static void assign(orte_job_t *jdata);
diff --git orte/orted/pmix/pmix_server_register_fns.c orte/orted/pmix/pmix_server_register_fns.c
index 25fb26475918..ff98f6de59cb 100644
--- orte/orted/pmix/pmix_server_register_fns.c
+++ orte/orted/pmix/pmix_server_register_fns.c
@@ -11,7 +11,7 @@
* All rights reserved.
* Copyright (c) 2006-2013 Los Alamos National Security, LLC.
* All rights reserved.
- * Copyright (c) 2009 Cisco Systems, Inc. All rights reserved.
+ * Copyright (c) 2009-2018 Cisco Systems, Inc. All rights reserved
* Copyright (c) 2011 Oak Ridge National Labs. All rights reserved.
* Copyright (c) 2013-2017 Intel, Inc. All rights reserved.
* Copyright (c) 2014 Mellanox Technologies, Inc.
@@ -276,7 +276,11 @@ int orte_pmix_server_register_nspace(orte_job_t *jdata, bool force)
kv = OBJ_NEW(opal_value_t);
kv->key = strdup(OPAL_PMIX_AVAIL_PHYS_MEMORY);
kv->type = OPAL_UINT64;
+#if HWLOC_API_VERSION < 0x20000
kv->data.uint64 = machine->memory.total_memory;
+#else
+ kv->data.uint64 = machine->total_memory;
+#endif
opal_list_append(info, &kv->super);
}
++++++ openmpi3-rpmlintrc ++++++
--- /var/tmp/diff_new_pack.JS8vQa/_old 2022-09-30 17:59:02.225407992 +0200
+++ /var/tmp/diff_new_pack.JS8vQa/_new 2022-09-30 17:59:02.229408001 +0200
@@ -3,4 +3,5 @@
addFilter("openmpi3-libs.* rpath-in-buildconfig")
addFilter("openmpi3-devel.* shared-lib-calls-exit")
+addFilter("libopenmpi.* shlib-policy-name-error")
++++++ pmix-Fix-detection-of-Externally-built-PMIx.patch ++++++
commit ed3ecbc8be313cb3613d6022548deb41750b7725
Author: Artem Polyakov <artpol84(a)gmail.com>
Date: Tue Apr 20 15:16:24 2021 -0700
pmix: Fix detection of Externally-built PMIx
See https://github.com/open-mpi/ompi/issues/8823 for more details.
Signed-off-by: Artem Polyakov <artpol84(a)gmail.com>
diff --git opal/mca/pmix/ext1x/pmix1x_component.c opal/mca/pmix/ext1x/pmix1x_component.c
index 747f95cc6139..57e91305fd85 100644
--- opal/mca/pmix/ext1x/pmix1x_component.c
+++ opal/mca/pmix/ext1x/pmix1x_component.c
@@ -95,8 +95,7 @@ static int external_component_query(mca_base_module_t **module, int *priority)
char *t, *id;
/* see if a PMIx server is present */
- if (NULL != (t = getenv("PMIX_SERVER_URI")) ||
- NULL != (id = getenv("PMIX_ID"))) {
+ if (NULL != (t = getenv("PMIX_NAMESPACE"))) {
/* if PMIx is present, then we are a client and need to use it */
*priority = 100;
} else {
++++++ pmix-pmix2x-Fix-the-PMIx-discovery-logic.patch ++++++
commit 4bb0f4200ae67d203d59f19c7d38c01cc320ee03
Author: Artem Polyakov <artpol84(a)gmail.com>
Date: Tue Apr 20 09:17:32 2021 -0700
pmix/pmix2x: Fix the PMIx discovery logic
See https://github.com/open-mpi/ompi/issues/8823 for the details.
Signed-off-by: Artem Polyakov <artpol84(a)gmail.com>
diff --git opal/mca/pmix/pmix2x/pmix2x_component.c opal/mca/pmix/pmix2x/pmix2x_component.c
index 74a08a77058b..0771331a0679 100644
--- opal/mca/pmix/pmix2x/pmix2x_component.c
+++ opal/mca/pmix/pmix2x/pmix2x_component.c
@@ -118,9 +118,7 @@ static int external_component_query(mca_base_module_t **module, int *priority)
char *t, *id;
/* see if a PMIx server is present */
- if (NULL != (t = getenv("PMIX_SERVER_URI")) ||
- NULL != (t = getenv("PMIX_SERVER_URI2")) ||
- NULL != (id = getenv("PMIX_ID"))) {
+ if (NULL != (t = getenv("PMIX_NAMESPACE"))) {
/* if PMIx is present, then we are a client and need to use it */
*priority = 100;
} else {
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-azure-mgmt-monitor for openSUSE:Factory checked in at 2022-09-30 17:58:37
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-azure-mgmt-monitor (Old)
and /work/SRC/openSUSE:Factory/.python-azure-mgmt-monitor.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-azure-mgmt-monitor"
Fri Sep 30 17:58:37 2022 rev:16 rq:1007195 version:5.0.0
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-azure-mgmt-monitor/python-azure-mgmt-monitor.changes 2022-09-19 16:03:49.238227920 +0200
+++ /work/SRC/openSUSE:Factory/.python-azure-mgmt-monitor.new.2275/python-azure-mgmt-monitor.changes 2022-09-30 17:59:00.653404632 +0200
@@ -1,0 +2,9 @@
+Fri Sep 23 12:40:10 UTC 2022 - John Paul Adrian Glaubitz <adrian.glaubitz(a)suse.com>
+
+- New upstream release
+ + Version 5.0.0
+ + For detailed information about changes see the
+ CHANGELOG.md file provided with this package
+- Update Requires from setup.py
+
+-------------------------------------------------------------------
Old:
----
azure-mgmt-monitor-4.0.1.zip
New:
----
azure-mgmt-monitor-5.0.0.zip
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-azure-mgmt-monitor.spec ++++++
--- /var/tmp/diff_new_pack.oW2MPb/_old 2022-09-30 17:59:01.093405573 +0200
+++ /var/tmp/diff_new_pack.oW2MPb/_new 2022-09-30 17:59:01.097405581 +0200
@@ -21,7 +21,7 @@
%define skip_python2 1
%endif
Name: python-azure-mgmt-monitor
-Version: 4.0.1
+Version: 5.0.0
Release: 0
Summary: Microsoft Azure Monitor Client Library
License: MIT
@@ -38,10 +38,10 @@
Requires: python-azure-common < 2.0.0
Requires: python-azure-common >= 1.1
Requires: python-azure-mgmt-core < 2.0.0
-Requires: python-azure-mgmt-core >= 1.3.1
+Requires: python-azure-mgmt-core >= 1.3.2
Requires: python-azure-mgmt-nspkg >= 3.0.0
Requires: python-azure-nspkg >= 3.0.0
-Requires: python-msrest >= 0.6.21
+Requires: python-msrest >= 0.7.1
Conflicts: python-azure-sdk <= 2.0.0
BuildArch: noarch
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package python-PyInstaller for openSUSE:Factory checked in at 2022-09-30 17:58:36
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/python-PyInstaller (Old)
and /work/SRC/openSUSE:Factory/.python-PyInstaller.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "python-PyInstaller"
Fri Sep 30 17:58:36 2022 rev:7 rq:1007201 version:5.4.1
Changes:
--------
--- /work/SRC/openSUSE:Factory/python-PyInstaller/python-PyInstaller.changes 2021-11-20 02:39:59.544611427 +0100
+++ /work/SRC/openSUSE:Factory/.python-PyInstaller.new.2275/python-PyInstaller.changes 2022-09-30 17:58:59.289401716 +0200
@@ -1,0 +2,24 @@
+Fri Sep 30 08:41:35 UTC 2022 - Mark��ta Machov�� <mmachova(a)suse.com>
+
+- Update to 5.4.1
+ * Many changes, see upstream changelog
+ * Issue an error report if a .spec file will not be generated, but
+ command-line options specific to that functionality are given
+ * The `PyInstaller.utils.hooks.get_module_attribute` function now
+ returns the actual attribute value instead of its string representation.
+ * The matplotlib.backends hook no longer collects all available matplotlib
+ backends, but rather tries to auto-detect the used backend(s) by default.
+ * Drop support for Python 3.6.
+ * Make the error handing of `~PyInstaller.utils.hooks.collect_submodules`
+ configurable.
+ * Detect if an icon file (.ico or .icns) is of another image type but has
+ been mislabelled as a native icon type via its file suffix then either
+ normalise to a genuinely native image type if pillow is installed or
+ raise an error.
+ * Implement a mechanism for controlling the collection mode of modules and
+ packages, with granularity ranging from top-level packages to individual
+ sub-modules.
+ * Add support for setuptools-provided distutils, available since
+ setuptools >= 60.0.
+
+-------------------------------------------------------------------
Old:
----
pyinstaller-4.7.tar.gz
New:
----
pyinstaller-5.4.1.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ python-PyInstaller.spec ++++++
--- /var/tmp/diff_new_pack.io8JvD/_old 2022-09-30 17:58:59.741402682 +0200
+++ /var/tmp/diff_new_pack.io8JvD/_new 2022-09-30 17:58:59.745402691 +0200
@@ -1,7 +1,7 @@
#
# spec file for package python-PyInstaller
#
-# Copyright (c) 2021 SUSE LLC
+# Copyright (c) 2022 SUSE LLC
#
# All modifications and additions to the file contributed by third parties
# remain the property of their copyright owners, unless otherwise agreed
@@ -21,7 +21,7 @@
%bcond_without test
%define modname PyInstaller
Name: python-PyInstaller
-Version: 4.7
+Version: 5.4.1
Release: 0
Summary: Bundle a Python application and all its dependencies into a single package
License: GPL-2.0-only
@@ -43,6 +43,7 @@
%if %{with test}
BuildRequires: %{python_module Babel}
BuildRequires: %{python_module Django}
+BuildRequires: %{python_module Pillow}
BuildRequires: %{python_module QtAwesome}
BuildRequires: %{python_module Sphinx}
BuildRequires: %{python_module cryptography}
@@ -52,6 +53,7 @@
BuildRequires: %{python_module pefile >= 2017.8.1}
BuildRequires: %{python_module psutil}
BuildRequires: %{python_module pycountry}
+BuildRequires: %{python_module pyinstaller-hooks-contrib}
BuildRequires: %{python_module pytest-xdist}
BuildRequires: %{python_module pytest}
BuildRequires: %{python_module qt5}
@@ -106,9 +108,9 @@
%if %{with test}
%check
export LANG=en_US.UTF-8
-# test_get_co_using_ctypes, test_get_co_using_ctypes_from_extension, test_replace_paths_in_code broken with python 3.8 on PyInstall 3.6
-# gh#pyinstaller/pyinstaller#4406 skip TestDeeplyNested.testRegr (it is just the only method in the class)
-%pytest_arch -n auto tests/unit -k 'not (test_find_module or test_egg and not test_nspkg1 or test_get_co_using_ctypes or test_get_co_using_ctypes_from_extension or test_replace_paths_in_code or TestDeeplyNested)'
+# https://github.com/pyinstaller/pyinstaller/commit/2df8314ffaedd95ddc9e28712…
+# the test is broken since 5.2
+%pytest_arch -n auto tests/unit -k "not test_normalize_icon"
%endif
%post
++++++ pyinstaller-4.7.tar.gz -> pyinstaller-5.4.1.tar.gz ++++++
++++ 85002 lines of diff (skipped)
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package perl-Image-ExifTool for openSUSE:Factory checked in at 2022-09-30 17:58:35
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/perl-Image-ExifTool (Old)
and /work/SRC/openSUSE:Factory/.perl-Image-ExifTool.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "perl-Image-ExifTool"
Fri Sep 30 17:58:35 2022 rev:45 rq:1007185 version:12.45
Changes:
--------
--- /work/SRC/openSUSE:Factory/perl-Image-ExifTool/perl-Image-ExifTool.changes 2022-07-04 11:33:06.412045531 +0200
+++ /work/SRC/openSUSE:Factory/.perl-Image-ExifTool.new.2275/perl-Image-ExifTool.changes 2022-09-30 17:58:56.581395927 +0200
@@ -1,0 +2,30 @@
+Fri Sep 30 07:26:22 UTC 2022 - Paolo Stivanin <info(a)paolostivanin.com>
+
+- Update to version 12.45:
+ * Added new IPTC Video Metadata version 1.3 tags
+ * Added a couple of new Canon lenses (thanks Norbert Wasser)
+ * Added a new Sony LensType (thanks Jos Roost)
+ * Added known Unknown value for IPTC ChromaticityColorant (thanks Herb)
+ * Added new Nikon WhiteBalanceFineTune tag (thanks Milos Komarcevic)
+ * Extract the raw thermal data from all frames of a SEQ file when -ee2 is used
+ * Decode individual tags in QuickTime ColorRepresentation
+ * Decode a new Matroska tag
+ * Improved verbose "nothing changed" messages when writing
+ * Patched -ee option to extract metadata after the first Cluster in MKV videos
+ (previously only -U and -v did this)
+ * Patched to differentiate Java bytecode .class files from Mach-O fat binaries
+ * Patched to avoid "Use of uninitialized value" warning when deleting GPS
+ coordinates via the newly writable Composite tags
+ * Patched to avoid duplicating raw data when writing Sony ARW images where the
+ raw data is double-referenced as both strips and tiles (affects ARW images
+ from some newer models like the ILCE-1 when SonyRawFileType is "Lossless
+ Compressed RAW 2")
+ * Patched to avoid "fixing" the order of IFD entries in TIFF-based RAW files
+ to improve compatibility with some RAW viewers
+ * Minor change to Composite FileNumber to remove "-" when -n is used
+ * Fixed problem extracting some timed metadata when "-api ignoretags=all" was
+ used with "-api requesttags" to request the specific information
+ * Fixed -validate feature to avoid incorrectly warning about non-capitalized
+ boolean values in XMP
+
+-------------------------------------------------------------------
Old:
----
Image-ExifTool-12.42.tar.gz
New:
----
Image-ExifTool-12.45.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ perl-Image-ExifTool.spec ++++++
--- /var/tmp/diff_new_pack.0q6Mlo/_old 2022-09-30 17:58:57.129397099 +0200
+++ /var/tmp/diff_new_pack.0q6Mlo/_new 2022-09-30 17:58:57.133397108 +0200
@@ -19,7 +19,7 @@
%define cpan_name Image-ExifTool
Name: perl-Image-ExifTool
-Version: 12.42
+Version: 12.45
Release: 0
License: Artistic-1.0 OR GPL-1.0-or-later
Summary: Read and write meta information
++++++ Image-ExifTool-12.42.tar.gz -> Image-ExifTool-12.45.tar.gz ++++++
++++ 13896 lines of diff (skipped)
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package helmfile for openSUSE:Factory checked in at 2022-09-30 17:58:33
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/helmfile (Old)
and /work/SRC/openSUSE:Factory/.helmfile.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "helmfile"
Fri Sep 30 17:58:33 2022 rev:33 rq:1007197 version:0.146.0
Changes:
--------
--- /work/SRC/openSUSE:Factory/helmfile/helmfile.changes 2022-09-15 23:00:03.465362324 +0200
+++ /work/SRC/openSUSE:Factory/.helmfile.new.2275/helmfile.changes 2022-09-30 17:58:55.141392849 +0200
@@ -1,0 +2,32 @@
+Wed Sep 28 09:20:45 UTC 2022 - manfred.h(a)gmx.net
+
+- Update to version 0.146.0:
+ This minor release incorporates many new features and a few bug
+ fixes, almost all thanks to 5 new contributors! Kudos to everyone
+ who contributed code ����
+ Also, we've been fixing a handful of regressions introduced via
+ v0.145.0 in v0.145.x patch releases. For this release, the only
+ regression we found and fixed was #370. Hopefully, this will be
+ the most reliable release ever since v0.145.0!
+
+ What's Changed:
+ * Support helm-secrets v4.0.0 by @KqLLL in #360
+ * use latest helm in github ci by @yxxhero in #367
+ * feat: show live output from the Helm binary by @rodrigorfk
+ in #286
+ * remove flags.Parse in cobra by @yxxhero in #369
+ * fix(oci): clean dead code by @jycamier in #290
+ * added option for --no-hooks for helm diff and apply by
+ @arpanadhikari in #279
+ * chore: Tidy up a local var scopinng for maintainability by
+ @mumoshu in #372
+ * move interactive option to global by @yxxhero in #370
+ * fix: add missing untar flag to pull chart from oci registry
+ by @toVersus in #371
+ * update CLI Ref by @yxxhero in #373
+ * Add preapply hook by @Sajfer in #79
+ * Fix not to ignore diff selector when it matched nothing by
+ @mumoshu in #374
+ * Bump chartify to v0.11.0 by @mumoshu in #375
+
+-------------------------------------------------------------------
Old:
----
helmfile-0.145.5.tar.gz
New:
----
helmfile-0.146.0.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ helmfile.spec ++++++
--- /var/tmp/diff_new_pack.GQr7EL/_old 2022-09-30 17:58:55.893394457 +0200
+++ /var/tmp/diff_new_pack.GQr7EL/_new 2022-09-30 17:58:55.901394474 +0200
@@ -17,9 +17,9 @@
#
-%define git_commit 3ff98c02a1ff92c7d1d3118ee064f73e1a95fa36
+%define git_commit 3c151b8d458890da5e2c7b6422fe3c754899bdf0
Name: helmfile
-Version: 0.145.5
+Version: 0.146.0
Release: 0
Summary: Deploy Kubernetes Helm Charts
License: MIT
++++++ _service ++++++
--- /var/tmp/diff_new_pack.GQr7EL/_old 2022-09-30 17:58:55.937394551 +0200
+++ /var/tmp/diff_new_pack.GQr7EL/_new 2022-09-30 17:58:55.941394559 +0200
@@ -5,7 +5,7 @@
<param name="exclude">.git</param>
<param name="versionformat">@PARENT_TAG@</param>
<param name="versionrewrite-pattern">v(.*)</param>
- <param name="revision">v0.145.5</param>
+ <param name="revision">v0.146.0</param>
<param name="changesgenerate">enable</param>
</service>
<service name="recompress" mode="disabled">
++++++ _servicedata ++++++
--- /var/tmp/diff_new_pack.GQr7EL/_old 2022-09-30 17:58:55.965394610 +0200
+++ /var/tmp/diff_new_pack.GQr7EL/_new 2022-09-30 17:58:55.969394619 +0200
@@ -1,5 +1,5 @@
<servicedata>
<service name="tar_scm">
<param name="url">https://github.com/helmfile/helmfile.git</param>
- <param name="changesrevision">3ff98c02a1ff92c7d1d3118ee064f73e1a95fa36</param></service></servicedata>
+ <param name="changesrevision">3c151b8d458890da5e2c7b6422fe3c754899bdf0</param></service></servicedata>
++++++ helmfile-0.145.5.tar.gz -> helmfile-0.146.0.tar.gz ++++++
++++ 3334 lines of diff (skipped)
++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/helmfile/vendor.tar.gz /work/SRC/openSUSE:Factory/.helmfile.new.2275/vendor.tar.gz differ: char 5, line 1
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package go-sendxmpp for openSUSE:Factory checked in at 2022-09-30 17:58:32
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/go-sendxmpp (Old)
and /work/SRC/openSUSE:Factory/.go-sendxmpp.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "go-sendxmpp"
Fri Sep 30 17:58:32 2022 rev:3 rq:1007173 version:0.5.4
Changes:
--------
--- /work/SRC/openSUSE:Factory/go-sendxmpp/go-sendxmpp.changes 2022-09-29 18:15:24.791496686 +0200
+++ /work/SRC/openSUSE:Factory/.go-sendxmpp.new.2275/go-sendxmpp.changes 2022-09-30 17:58:54.121390669 +0200
@@ -2 +2 @@
-Thu Sep 29 14:27:43 UTC 2022 - mvetter(a)suse.com
+Fri Sep 30 07:20:09 UTC 2022 - mvetter(a)suse.com
@@ -4,60 +4,2 @@
-- Update to version 0.5.3:
- * Prepare release v0.5.3
- * Don't check for empty message in listen mode.
- * Start new development cycle.
- * Release v0.5.2
- * Update go modules.
- * Don't connect to the XMPP server if the message is empty
- * Update go modules.
- * Remove another "little".
- * Remove the "little".
- * Fix typo.
- * Improve error handling in XML parsing.
- * Update go modules.
- * Update go modules.
- * Update go modules.
- * Update go modules.
- * Update go modules.
- * Update go modules.
- * Update go modules.
- * Use resource as alias if specified and no alias is specified.
- * Update go modules.
- * Deprecate resource setting and introduce alias setting.
- * Update go modules.
- * Detect CNAME loops.
- * Update go modules.
- * Perform CNAME look ups.
- * Update go modules.
- * Fix typo.
- * Reply to IQs.
- * CI: Strip binaries.
- * Update go modules.
- * Update go modules.
- * Remove local replace in go.mod.
- * Update manpage.
- * Add new lines to stanzas created by go-sendxmpp itself.
- * Update go modules.
- * CI: Only build binaries for releases.
- * CI: Disable PIE for 32bit builds.
- * CI: Try 'static-pie' for 32bit builds.
- * CI: Enable all builds except linux/386.
- * CI: Disable all builds except amd64 for testing.
- * Hopefully fix CI.
- * Revert "Add hardening flags to CI builds."
- * Revert "Remove `-race` from CI build."
- * Remove `-race` from CI build.
- * Add hardening flags to CI builds.
- * Update go modules.
- * Words���
- * Update go modules.
- * Update go modules.
- * Fix tense.
- * Added more checks for empty messages.
- * Update go modules.
- * Ox: Don't encrypt empty messages.
- * Ox: Improve error messages for failed key requests.
- * Update modules.
- * Update modules.
- * Update go modules.
- * Update go modules.
- * Start new development cycle.
+- Update to version 0.5.4:
+ * Fix http-upload.
Old:
----
go-sendxmpp-0.5.3.tar.gz
New:
----
go-sendxmpp-0.5.4.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ go-sendxmpp.spec ++++++
--- /var/tmp/diff_new_pack.gdTrPe/_old 2022-09-30 17:58:54.633391763 +0200
+++ /var/tmp/diff_new_pack.gdTrPe/_new 2022-09-30 17:58:54.637391772 +0200
@@ -17,7 +17,7 @@
Name: go-sendxmpp
-Version: 0.5.3
+Version: 0.5.4
Release: 0
Summary: A little tool to send messages to an XMPP contact or MUC
License: BSD-2-Clause
++++++ _service ++++++
--- /var/tmp/diff_new_pack.gdTrPe/_old 2022-09-30 17:58:54.677391857 +0200
+++ /var/tmp/diff_new_pack.gdTrPe/_new 2022-09-30 17:58:54.681391866 +0200
@@ -3,9 +3,9 @@
<param name="url">https://salsa.debian.org/mdosch/go-sendxmpp.git</param>
<param name="scm">git</param>
<param name="exclude">.git</param>
- <param name="revision">v0.5.3</param>
+ <param name="revision">v0.5.4</param>
<param name="versionformat">@PARENT_TAG@</param>
- <param name="changesgenerate">enable</param>
+ <param name="changesgenerate">disable</param>
<param name="versionrewrite-pattern">v(.*)</param>
</service>
<service name="set_version" mode="disabled">
++++++ go-sendxmpp-0.5.3.tar.gz -> go-sendxmpp-0.5.4.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/CHANGELOG.md new/go-sendxmpp-0.5.4/CHANGELOG.md
--- old/go-sendxmpp-0.5.3/CHANGELOG.md 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/CHANGELOG.md 2022-09-30 07:26:03.000000000 +0200
@@ -1,5 +1,9 @@
# Changelog
+## [v0.5.4] 2022-09-30
+### Changed
+- Fix http-upload.
+
## [v0.5.3] 2022-09-29
### Changed
- Don't check for empty message when `--listen` is used.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/const.go new/go-sendxmpp-0.5.4/const.go
--- old/go-sendxmpp-0.5.3/const.go 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/const.go 2022-09-30 07:26:03.000000000 +0200
@@ -5,7 +5,7 @@
package main
const (
- version = "0.5.3"
+ version = "0.5.4"
nsDiscoInfo = "http://jabber.org/protocol/disco#info"
nsDiscoItems = "http://jabber.org/protocol/disco#items"
nsEme = "urn:xmpp:eme:0"
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/main.go new/go-sendxmpp-0.5.4/main.go
--- old/go-sendxmpp-0.5.3/main.go 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/main.go 2022-09-30 07:26:03.000000000 +0200
@@ -269,7 +269,7 @@
// Skip reading message if '-i' or '--interactive' is set to work with e.g. 'tail -f'.
// Also for listening mode.
- if !*flagInteractive && !*flagListen {
+ if !*flagInteractive && !*flagListen && *flagHTTPUpload == "" {
if message == "" {
scanner := bufio.NewScanner(os.Stdin)
@@ -296,7 +296,7 @@
message = reg.ReplaceAllString(message, "")
// Exit if message is empty.
- if message == "" && !*flagInteractive && !*flagListen {
+ if message == "" && !*flagInteractive && !*flagListen && *flagHTTPUpload == "" {
os.Exit(0)
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/man/go-sendxmpp.1 new/go-sendxmpp-0.5.4/man/go-sendxmpp.1
--- old/go-sendxmpp-0.5.3/man/go-sendxmpp.1 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/man/go-sendxmpp.1 2022-09-30 07:26:03.000000000 +0200
@@ -10,7 +10,7 @@
.br
You can either pipe a programs output to \fBgo\-sendxmpp\fR, write in your terminal (put \fB^D\fR in a new line to finish) or send the input from a file (\fB\-m\fR or \fB\-\-message\fR)\. The account data is expected at \fB~/\.config/go\-sendxmpp/config\fR (preferred), \fB~/\.config/go\-sendxmpp/sendxmpprc\fR (deprecated) \fB~/\.sendxmpprc\fR (for compatibility with the original perl sendxmpp) if no other configuration file location is specified with \fB\-f\fR or \fB\-\-file\fR\.
.SH "OPTIONS"
-\fB\-a\fR, \fB\-\-alias\fR=[\fIvalue\fR]: Set alias/nicknamefor chatrooms\.
+\fB\-a\fR, \fB\-\-alias\fR=[\fIvalue\fR]: Set alias/nickname for chatrooms\.
.P
\fB\-c\fR, \fB\-\-chatroom\fR=[\fIvalue\fR]: Send message to a chatroom\.
.P
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/man/go-sendxmpp.1.html new/go-sendxmpp-0.5.4/man/go-sendxmpp.1.html
--- old/go-sendxmpp-0.5.3/man/go-sendxmpp.1.html 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/man/go-sendxmpp.1.html 2022-09-30 07:26:03.000000000 +0200
@@ -92,7 +92,7 @@
<h2 id="OPTIONS">OPTIONS</h2>
<p><code>-a</code>, <code>--alias</code>=[<var>value</var>]:
- Set alias/nicknamefor chatrooms.</p>
+ Set alias/nickname for chatrooms.</p>
<p><code>-c</code>, <code>--chatroom</code>=[<var>value</var>]:
Send message to a chatroom.</p>
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/go-sendxmpp-0.5.3/man/go-sendxmpp.1.ronn new/go-sendxmpp-0.5.4/man/go-sendxmpp.1.ronn
--- old/go-sendxmpp-0.5.3/man/go-sendxmpp.1.ronn 2022-09-29 15:37:40.000000000 +0200
+++ new/go-sendxmpp-0.5.4/man/go-sendxmpp.1.ronn 2022-09-30 07:26:03.000000000 +0200
@@ -17,7 +17,7 @@
## OPTIONS
`-a`, `--alias`=[<value>]:
- Set alias/nicknamefor chatrooms.
+ Set alias/nickname for chatrooms.
`-c`, `--chatroom`=[<value>]:
Send message to a chatroom.
++++++ vendor.tar.gz ++++++
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package terraform for openSUSE:Factory checked in at 2022-09-30 17:58:28
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/terraform (Old)
and /work/SRC/openSUSE:Factory/.terraform.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "terraform"
Fri Sep 30 17:58:28 2022 rev:41 rq:1007168 version:1.3.1
Changes:
--------
--- /work/SRC/openSUSE:Factory/terraform/terraform.changes 2022-09-27 20:14:35.817914707 +0200
+++ /work/SRC/openSUSE:Factory/.terraform.new.2275/terraform.changes 2022-09-30 17:58:50.229382349 +0200
@@ -1,0 +2,11 @@
+Fri Sep 30 05:13:08 UTC 2022 - Johannes Kastl <kastl(a)b1-systems.de>
+
+- update to 1.3.1:
+ * BUG FIXES:
+ - Fixed a crash when using objects with optional attributes and default values in collections, most visible with nested modules. (#31847)
+ - Prevent cycles in some situations where a provider depends on resources in the configuration which are participating in planned changes. (#31857)
+ - Fixed an error when attempting to destroy a configuration where resources do not exist in the state. (#31858)
+ - Data sources which cannot be read during will no longer prevent the state from being serialized. (#31871)
+ - Fixed a crash which occured when a resource with a precondition and/or a postcondition appeared inside a module with two or more instances. (#31860)
+
+-------------------------------------------------------------------
Old:
----
terraform-1.3.0.obscpio
terraform-1.3.0.tar.gz
New:
----
terraform-1.3.1.obscpio
terraform-1.3.1.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ terraform.spec ++++++
--- /var/tmp/diff_new_pack.6a1BDT/_old 2022-09-30 17:58:51.349384743 +0200
+++ /var/tmp/diff_new_pack.6a1BDT/_new 2022-09-30 17:58:51.357384760 +0200
@@ -17,7 +17,7 @@
Name: terraform
-Version: 1.3.0
+Version: 1.3.1
Release: 0
Summary: Tool for building infrastructure safely and efficiently
License: MPL-2.0
++++++ _service ++++++
--- /var/tmp/diff_new_pack.6a1BDT/_old 2022-09-30 17:58:51.397384846 +0200
+++ /var/tmp/diff_new_pack.6a1BDT/_new 2022-09-30 17:58:51.401384854 +0200
@@ -3,8 +3,8 @@
<param name="url">https://github.com/hashicorp/terraform</param>
<param name="scm">git</param>
<param name="filename">terraform</param>
- <param name="versionformat">1.3.0</param>
- <param name="revision">v1.3.0</param>
+ <param name="versionformat">1.3.1</param>
+ <param name="revision">v1.3.1</param>
<param name="exclude">.git</param>
</service>
<service name="tar" mode="disabled"/>
@@ -16,7 +16,7 @@
<param name="basename">terraform</param>
</service>
<service name="go_modules" mode="disabled">
- <param name="archive">terraform-1.3.0.tar.gz</param>
+ <param name="archive">terraform-1.3.1.tar.gz</param>
</service>
</services>
++++++ terraform-1.3.0.obscpio -> terraform-1.3.1.obscpio ++++++
/work/SRC/openSUSE:Factory/terraform/terraform-1.3.0.obscpio /work/SRC/openSUSE:Factory/.terraform.new.2275/terraform-1.3.1.obscpio differ: char 49, line 1
++++++ terraform-1.3.0.tar.gz -> terraform-1.3.1.tar.gz ++++++
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/CHANGELOG.md new/terraform-1.3.1/CHANGELOG.md
--- old/terraform-1.3.0/CHANGELOG.md 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/CHANGELOG.md 2022-09-28 15:46:33.000000000 +0200
@@ -1,3 +1,16 @@
+## 1.3.1 (September 28, 2022)
+
+NOTE:
+* On `darwin/amd64` and `darwin/arm64` architectures, `terraform` binaries are now built with CGO enabled. This should not have any user-facing impact, except in cases where the pure Go DNS resolver causes problems on recent versions of macOS: using CGO may mitigate these issues. Please see the upstream bug https://github.com/golang/go/issues/52839 for more details.
+
+BUG FIXES:
+
+* Fixed a crash when using objects with optional attributes and default values in collections, most visible with nested modules. ([#31847](https://github.com/hashicorp/terraform/issues/31847))
+* Prevent cycles in some situations where a provider depends on resources in the configuration which are participating in planned changes. ([#31857](https://github.com/hashicorp/terraform/issues/31857))
+* Fixed an error when attempting to destroy a configuration where resources do not exist in the state. ([#31858](https://github.com/hashicorp/terraform/issues/31858))
+* Data sources which cannot be read during will no longer prevent the state from being serialized. ([#31871](https://github.com/hashicorp/terraform/issues/31871))
+* Fixed a crash which occured when a resource with a precondition and/or a postcondition appeared inside a module with two or more instances. ([#31860](https://github.com/hashicorp/terraform/issues/31860))
+
## 1.3.0 (September 21, 2022)
NEW FEATURES:
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/go.mod new/terraform-1.3.1/go.mod
--- old/terraform-1.3.0/go.mod 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/go.mod 2022-09-28 15:46:33.000000000 +0200
@@ -42,7 +42,7 @@
github.com/hashicorp/go-uuid v1.0.3
github.com/hashicorp/go-version v1.6.0
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f
- github.com/hashicorp/hcl/v2 v2.14.0
+ github.com/hashicorp/hcl/v2 v2.14.1
github.com/hashicorp/terraform-config-inspect v0.0.0-20210209133302-4fd17a0faac2
github.com/hashicorp/terraform-registry-address v0.0.0-20220623143253-7d51757b572c
github.com/hashicorp/terraform-svchost v0.0.0-20200729002733-f050f53b9734
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/go.sum new/terraform-1.3.1/go.sum
--- old/terraform-1.3.0/go.sum 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/go.sum 2022-09-28 15:46:33.000000000 +0200
@@ -387,8 +387,8 @@
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f h1:UdxlrJz4JOnY8W+DbLISwf2B8WXEolNRA8BGCwI9jws=
github.com/hashicorp/hcl v0.0.0-20170504190234-a4b07c25de5f/go.mod h1:oZtUIOe8dh44I2q6ScRibXws4Ajl+d+nod3AaR9vL5w=
github.com/hashicorp/hcl/v2 v2.0.0/go.mod h1:oVVDG71tEinNGYCxinCYadcmKU9bglqW9pV3txagJ90=
-github.com/hashicorp/hcl/v2 v2.14.0 h1:jX6+Q38Ly9zaAJlAjnFVyeNSNCKKW8D0wvyg7vij5Wc=
-github.com/hashicorp/hcl/v2 v2.14.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
+github.com/hashicorp/hcl/v2 v2.14.1 h1:x0BpjfZ+CYdbiz+8yZTQ+gdLO7IXvOut7Da+XJayx34=
+github.com/hashicorp/hcl/v2 v2.14.1/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0=
github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d h1:9ARUJJ1VVynB176G1HCwleORqCaXm/Vx0uUi0dL26I0=
github.com/hashicorp/jsonapi v0.0.0-20210826224640-ee7dae0fb22d/go.mod h1:Yog5+CPEM3c99L1CL2CFCYoSzgWm5vTU58idbRUaLik=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/dag/graph.go new/terraform-1.3.1/internal/dag/graph.go
--- old/terraform-1.3.0/internal/dag/graph.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/dag/graph.go 2022-09-28 15:46:33.000000000 +0200
@@ -230,6 +230,28 @@
s.Add(source)
}
+// Subsume imports all of the nodes and edges from the given graph into the
+// reciever, leaving the given graph unchanged.
+//
+// If any of the nodes in the given graph are already present in the reciever
+// then the existing node will be retained and any new edges from the given
+// graph will be connected with it.
+//
+// If the given graph has edges in common with the reciever then they will be
+// ignored, because each pair of nodes can only be connected once.
+func (g *Graph) Subsume(other *Graph) {
+ // We're using Set.Filter just as a "visit each element" here, so we're
+ // not doing anything with the result (which will always be empty).
+ other.vertices.Filter(func(i interface{}) bool {
+ g.Add(i)
+ return false
+ })
+ other.edges.Filter(func(i interface{}) bool {
+ g.Connect(i.(Edge))
+ return false
+ })
+}
+
// String outputs some human-friendly output for the graph structure.
func (g *Graph) StringWithNodeTypes() string {
var buf bytes.Buffer
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_apply2_test.go new/terraform-1.3.1/internal/terraform/context_apply2_test.go
--- old/terraform-1.3.0/internal/terraform/context_apply2_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_apply2_test.go 2022-09-28 15:46:33.000000000 +0200
@@ -1201,6 +1201,25 @@
state, diags := ctx.Apply(plan, m)
assertNoErrors(t, diags)
+ // Resource changes which have dependencies across providers which
+ // themselves depend on resources can result in cycles.
+ // Because other_object transitively depends on the module resources
+ // through its provider, we trigger changes on both sides of this boundary
+ // to ensure we can create a valid plan.
+ //
+ // Taint the object to make sure a replacement works in the plan.
+ otherObjAddr := mustResourceInstanceAddr("other_object.other")
+ otherObj := state.ResourceInstance(otherObjAddr)
+ otherObj.Current.Status = states.ObjectTainted
+ // Force a change which needs to be reverted.
+ testObjAddr := mustResourceInstanceAddr(`module.mod["a"].test_object.a`)
+ testObjA := state.ResourceInstance(testObjAddr)
+ testObjA.Current.AttrsJSON = []byte(`{"test_bool":null,"test_list":null,"test_map":null,"test_number":null,"test_string":"changed"}`)
+
+ _, diags = ctx.Plan(m, state, opts)
+ assertNoErrors(t, diags)
+ return
+
otherProvider.ConfigureProviderCalled = false
otherProvider.ConfigureProviderFn = func(req providers.ConfigureProviderRequest) (resp providers.ConfigureProviderResponse) {
// check that our config is complete, even during a destroy plan
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_apply_test.go new/terraform-1.3.1/internal/terraform/context_apply_test.go
--- old/terraform-1.3.0/internal/terraform/context_apply_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_apply_test.go 2022-09-28 15:46:33.000000000 +0200
@@ -12165,6 +12165,70 @@
}
}
+func TestContext2Apply_moduleVariableOptionalAttributesDefaultChild(t *testing.T) {
+ m := testModuleInline(t, map[string]string{
+ "main.tf": `
+variable "in" {
+ type = list(object({
+ a = optional(set(string))
+ }))
+ default = [
+ { a = [ "foo" ] },
+ { },
+ ]
+}
+
+module "child" {
+ source = "./child"
+ in = var.in
+}
+
+output "out" {
+ value = module.child.out
+}
+`,
+ "child/main.tf": `
+variable "in" {
+ type = list(object({
+ a = optional(set(string), [])
+ }))
+ default = []
+}
+
+output "out" {
+ value = var.in
+}
+`,
+ })
+
+ ctx := testContext2(t, &ContextOpts{})
+
+ // We don't specify a value for the variable here, relying on its defined
+ // default.
+ plan, diags := ctx.Plan(m, states.NewState(), SimplePlanOpts(plans.NormalMode, testInputValuesUnset(m.Module.Variables)))
+ if diags.HasErrors() {
+ t.Fatal(diags.ErrWithWarnings())
+ }
+
+ state, diags := ctx.Apply(plan, m)
+ if diags.HasErrors() {
+ t.Fatal(diags.ErrWithWarnings())
+ }
+
+ got := state.RootModule().OutputValues["out"].Value
+ want := cty.ListVal([]cty.Value{
+ cty.ObjectVal(map[string]cty.Value{
+ "a": cty.SetVal([]cty.Value{cty.StringVal("foo")}),
+ }),
+ cty.ObjectVal(map[string]cty.Value{
+ "a": cty.SetValEmpty(cty.String),
+ }),
+ })
+ if !want.RawEquals(got) {
+ t.Fatalf("wrong result\ngot: %#v\nwant: %#v", got, want)
+ }
+}
+
func TestContext2Apply_provisionerSensitive(t *testing.T) {
m := testModule(t, "apply-provisioner-sensitive")
p := testProvider("aws")
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_import.go new/terraform-1.3.1/internal/terraform/context_import.go
--- old/terraform-1.3.0/internal/terraform/context_import.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_import.go 2022-09-28 15:46:33.000000000 +0200
@@ -82,6 +82,11 @@
return state, diags
}
+ // Data sources which could not be read during the import plan will be
+ // unknown. We need to strip those objects out so that the state can be
+ // serialized.
+ walker.State.RemovePlannedResourceInstanceObjects()
+
newState := walker.State.Close()
return newState, diags
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_import_test.go new/terraform-1.3.1/internal/terraform/context_import_test.go
--- old/terraform-1.3.0/internal/terraform/context_import_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_import_test.go 2022-09-28 15:46:33.000000000 +0200
@@ -430,7 +430,24 @@
func TestContextImport_refresh(t *testing.T) {
p := testProvider("aws")
- m := testModule(t, "import-provider")
+ m := testModuleInline(t, map[string]string{
+ "main.tf": `
+provider "aws" {
+ foo = "bar"
+}
+
+resource "aws_instance" "foo" {
+}
+
+
+// we are only importing aws_instance.foo, so these resources will be unknown
+resource "aws_instance" "bar" {
+}
+data "aws_data_source" "bar" {
+ foo = aws_instance.bar.id
+}
+`})
+
ctx := testContext2(t, &ContextOpts{
Providers: map[addrs.Provider]providers.Factory{
addrs.NewDefaultProvider("aws"): testProviderFuncFixed(p),
@@ -448,6 +465,13 @@
},
}
+ p.ReadDataSourceResponse = &providers.ReadDataSourceResponse{
+ State: cty.ObjectVal(map[string]cty.Value{
+ "id": cty.StringVal("id"),
+ "foo": cty.UnknownVal(cty.String),
+ }),
+ }
+
p.ReadResourceFn = nil
p.ReadResourceResponse = &providers.ReadResourceResponse{
@@ -471,6 +495,10 @@
t.Fatalf("unexpected errors: %s", diags.Err())
}
+ if d := state.ResourceInstance(mustResourceInstanceAddr("data.aws_data_source.bar")); d != nil {
+ t.Errorf("data.aws_data_source.bar has a status of ObjectPlanned and should not be in the state\ngot:%#v\n", d.Current)
+ }
+
actual := strings.TrimSpace(state.String())
expected := strings.TrimSpace(testImportRefreshStr)
if actual != expected {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_plan2_test.go new/terraform-1.3.1/internal/terraform/context_plan2_test.go
--- old/terraform-1.3.0/internal/terraform/context_plan2_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_plan2_test.go 2022-09-28 15:46:33.000000000 +0200
@@ -401,6 +401,129 @@
}
}
+func TestContext2Plan_resourceChecksInExpandedModule(t *testing.T) {
+ // When a resource is in a nested module we have two levels of expansion
+ // to do: first expand the module the resource is declared in, and then
+ // expand the resource itself.
+ //
+ // In earlier versions of Terraform we did that expansion as two levels
+ // of DynamicExpand, which led to a bug where we didn't have any central
+ // location from which to register all of the instances of a checkable
+ // resource.
+ //
+ // We now handle the full expansion all in one graph node and one dynamic
+ // subgraph, which avoids the problem. This is a regression test for the
+ // earlier bug. If this test is panicking with "duplicate checkable objects
+ // report" then that suggests the bug is reintroduced and we're now back
+ // to reporting each module instance separately again, which is incorrect.
+
+ p := testProvider("test")
+ p.GetProviderSchemaResponse = &providers.GetProviderSchemaResponse{
+ Provider: providers.Schema{
+ Block: &configschema.Block{},
+ },
+ ResourceTypes: map[string]providers.Schema{
+ "test": {
+ Block: &configschema.Block{},
+ },
+ },
+ }
+ p.ReadResourceFn = func(req providers.ReadResourceRequest) (resp providers.ReadResourceResponse) {
+ resp.NewState = req.PriorState
+ return resp
+ }
+ p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
+ resp.PlannedState = cty.EmptyObjectVal
+ return resp
+ }
+ p.ApplyResourceChangeFn = func(req providers.ApplyResourceChangeRequest) (resp providers.ApplyResourceChangeResponse) {
+ resp.NewState = req.PlannedState
+ return resp
+ }
+
+ m := testModuleInline(t, map[string]string{
+ "main.tf": `
+ module "child" {
+ source = "./child"
+ count = 2 # must be at least 2 for this test to be valid
+ }
+ `,
+ "child/child.tf": `
+ locals {
+ a = "a"
+ }
+
+ resource "test" "test1" {
+ lifecycle {
+ postcondition {
+ # It doesn't matter what this checks as long as it
+ # passes, because if we don't handle expansion properly
+ # then we'll crash before we even get to evaluating this.
+ condition = local.a == local.a
+ error_message = "Postcondition failed."
+ }
+ }
+ }
+
+ resource "test" "test2" {
+ count = 2
+
+ lifecycle {
+ postcondition {
+ # It doesn't matter what this checks as long as it
+ # passes, because if we don't handle expansion properly
+ # then we'll crash before we even get to evaluating this.
+ condition = local.a == local.a
+ error_message = "Postcondition failed."
+ }
+ }
+ }
+ `,
+ })
+
+ ctx := testContext2(t, &ContextOpts{
+ Providers: map[addrs.Provider]providers.Factory{
+ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
+ },
+ })
+
+ priorState := states.NewState()
+ plan, diags := ctx.Plan(m, priorState, DefaultPlanOpts)
+ assertNoErrors(t, diags)
+
+ resourceInsts := []addrs.AbsResourceInstance{
+ mustResourceInstanceAddr("module.child[0].test.test1"),
+ mustResourceInstanceAddr("module.child[0].test.test2[0]"),
+ mustResourceInstanceAddr("module.child[0].test.test2[1]"),
+ mustResourceInstanceAddr("module.child[1].test.test1"),
+ mustResourceInstanceAddr("module.child[1].test.test2[0]"),
+ mustResourceInstanceAddr("module.child[1].test.test2[1]"),
+ }
+
+ for _, instAddr := range resourceInsts {
+ t.Run(fmt.Sprintf("results for %s", instAddr), func(t *testing.T) {
+ if rc := plan.Changes.ResourceInstance(instAddr); rc != nil {
+ if got, want := rc.Action, plans.Create; got != want {
+ t.Errorf("wrong action for %s\ngot: %s\nwant: %s", instAddr, got, want)
+ }
+ if got, want := rc.ActionReason, plans.ResourceInstanceChangeNoReason; got != want {
+ t.Errorf("wrong action reason for %s\ngot: %s\nwant: %s", instAddr, got, want)
+ }
+ } else {
+ t.Errorf("no planned change for %s", instAddr)
+ }
+
+ if checkResult := plan.Checks.GetObjectResult(instAddr); checkResult != nil {
+ if got, want := checkResult.Status, checks.StatusPass; got != want {
+ t.Errorf("wrong check status for %s\ngot: %s\nwant: %s", instAddr, got, want)
+ }
+ } else {
+ t.Errorf("no check result for %s", instAddr)
+ }
+ })
+ }
+}
+
func TestContext2Plan_dataResourceChecksManagedResourceChange(t *testing.T) {
// This tests the situation where the remote system contains data that
// isn't valid per a data resource postcondition, but that the
@@ -3518,3 +3641,44 @@
t.Fatalf("no cycle error found:\n got: %s\n", msg)
}
}
+
+// plan a destroy with no state where configuration could fail to evaluate
+// expansion indexes.
+func TestContext2Plan_emptyDestroy(t *testing.T) {
+ m := testModuleInline(t, map[string]string{
+ "main.tf": `
+locals {
+ enable = true
+ value = local.enable ? module.example[0].out : null
+}
+
+module "example" {
+ count = local.enable ? 1 : 0
+ source = "./example"
+}
+`,
+ "example/main.tf": `
+resource "test_resource" "x" {
+}
+
+output "out" {
+ value = test_resource.x
+}
+`,
+ })
+
+ p := testProvider("test")
+ state := states.NewState()
+
+ ctx := testContext2(t, &ContextOpts{
+ Providers: map[addrs.Provider]providers.Factory{
+ addrs.NewDefaultProvider("test"): testProviderFuncFixed(p),
+ },
+ })
+
+ _, diags := ctx.Plan(m, state, &PlanOpts{
+ Mode: plans.DestroyMode,
+ })
+
+ assertNoErrors(t, diags)
+}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/context_plan_test.go new/terraform-1.3.1/internal/terraform/context_plan_test.go
--- old/terraform-1.3.0/internal/terraform/context_plan_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/context_plan_test.go 2022-09-28 15:46:33.000000000 +0200
@@ -4536,7 +4536,20 @@
func TestContext2Plan_ignoreChangesWildcard(t *testing.T) {
m := testModule(t, "plan-ignore-changes-wildcard")
p := testProvider("aws")
- p.PlanResourceChangeFn = testDiffFn
+ p.PlanResourceChangeFn = func(req providers.PlanResourceChangeRequest) (resp providers.PlanResourceChangeResponse) {
+ // computed attributes should not be set in config
+ id := req.Config.GetAttr("id")
+ if !id.IsNull() {
+ t.Error("computed id set in plan config")
+ }
+
+ foo := req.Config.GetAttr("foo")
+ if foo.IsNull() {
+ t.Error(`missing "foo" during plan, was set to "bar" in state and config`)
+ }
+
+ return testDiffFn(req)
+ }
state := states.NewState()
root := state.EnsureModule(addrs.RootModuleInstance)
@@ -4544,7 +4557,7 @@
mustResourceInstanceAddr("aws_instance.foo").Resource,
&states.ResourceInstanceObjectSrc{
Status: states.ObjectReady,
- AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","instance":"t2.micro","type":"aws_instance"}`),
+ AttrsJSON: []byte(`{"id":"bar","ami":"ami-abcd1234","instance":"t2.micro","type":"aws_instance","foo":"bar"}`),
},
mustProviderConfig(`provider["registry.terraform.io/hashicorp/aws"]`),
)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/graph.go new/terraform-1.3.1/internal/terraform/graph.go
--- old/terraform-1.3.0/internal/terraform/graph.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/graph.go 2022-09-28 15:46:33.000000000 +0200
@@ -82,8 +82,9 @@
log.Printf("[TRACE] vertex %q: expanding dynamic subgraph", dag.VertexName(v))
g, err := ev.DynamicExpand(vertexCtx)
- if err != nil {
- diags = diags.Append(err)
+ diags = diags.Append(err)
+ if diags.HasErrors() {
+ log.Printf("[TRACE] vertex %q: failed expanding dynamic subgraph: %s", dag.VertexName(v), err)
return
}
if g != nil {
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/graph_builder_plan.go new/terraform-1.3.1/internal/terraform/graph_builder_plan.go
--- old/terraform-1.3.0/internal/terraform/graph_builder_plan.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/graph_builder_plan.go 2022-09-28 15:46:33.000000000 +0200
@@ -170,6 +170,10 @@
// TargetsTransformer can determine which nodes to keep in the graph.
&DestroyEdgeTransformer{},
+ &pruneUnusedNodesTransformer{
+ skip: b.Operation != walkPlanDestroy,
+ },
+
// Target
&TargetsTransformer{Targets: b.Targets},
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/node_resource_abstract_instance.go new/terraform-1.3.1/internal/terraform/node_resource_abstract_instance.go
--- old/terraform-1.3.0/internal/terraform/node_resource_abstract_instance.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/node_resource_abstract_instance.go 2022-09-28 15:46:33.000000000 +0200
@@ -777,7 +777,7 @@
// starting values.
// Here we operate on the marked values, so as to revert any changes to the
// marks as well as the value.
- configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal)
+ configValIgnored, ignoreChangeDiags := n.processIgnoreChanges(priorVal, origConfigVal, schema)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return plan, state, keyData, diags
@@ -881,7 +881,7 @@
// providers that we must accommodate the behavior for now, so for
// ignore_changes to work at all on these values, we will revert the
// ignored values once more.
- plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal)
+ plannedNewVal, ignoreChangeDiags = n.processIgnoreChanges(unmarkedPriorVal, plannedNewVal, schema)
diags = diags.Append(ignoreChangeDiags)
if ignoreChangeDiags.HasErrors() {
return plan, state, keyData, diags
@@ -1145,7 +1145,7 @@
return plan, state, keyData, diags
}
-func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value) (cty.Value, tfdiags.Diagnostics) {
+func (n *NodeAbstractResource) processIgnoreChanges(prior, config cty.Value, schema *configschema.Block) (cty.Value, tfdiags.Diagnostics) {
// ignore_changes only applies when an object already exists, since we
// can't ignore changes to a thing we've not created yet.
if prior.IsNull() {
@@ -1158,9 +1158,23 @@
if len(ignoreChanges) == 0 && !ignoreAll {
return config, nil
}
+
if ignoreAll {
- return prior, nil
+ // If we are trying to ignore all attribute changes, we must filter
+ // computed attributes out from the prior state to avoid sending them
+ // to the provider as if they were included in the configuration.
+ ret, _ := cty.Transform(prior, func(path cty.Path, v cty.Value) (cty.Value, error) {
+ attr := schema.AttributeByPath(path)
+ if attr != nil && attr.Computed && !attr.Optional {
+ return cty.NullVal(v.Type()), nil
+ }
+
+ return v, nil
+ })
+
+ return ret, nil
}
+
if prior.IsNull() || config.IsNull() {
// Ignore changes doesn't apply when we're creating for the first time.
// Proposed should never be null here, but if it is then we'll just let it be.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/node_resource_plan.go new/terraform-1.3.1/internal/terraform/node_resource_plan.go
--- old/terraform-1.3.0/internal/terraform/node_resource_plan.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/node_resource_plan.go 2022-09-28 15:46:33.000000000 +0200
@@ -11,10 +11,9 @@
"github.com/hashicorp/terraform/internal/tfdiags"
)
-// nodeExpandPlannableResource handles the first layer of resource
-// expansion. We need this extra layer so DynamicExpand is called twice for
-// the resource, the first to expand the Resource for each module instance, and
-// the second to expand each ResourceInstance for the expanded Resources.
+// nodeExpandPlannableResource represents an addrs.ConfigResource and implements
+// DynamicExpand to a subgraph containing all of the addrs.AbsResourceInstance
+// resulting from both the containing module and resource-specific expansion.
type nodeExpandPlannableResource struct {
*NodeAbstractResource
@@ -53,12 +52,16 @@
_ GraphNodeAttachResourceConfig = (*nodeExpandPlannableResource)(nil)
_ GraphNodeAttachDependencies = (*nodeExpandPlannableResource)(nil)
_ GraphNodeTargetable = (*nodeExpandPlannableResource)(nil)
+ _ graphNodeExpandsInstances = (*nodeExpandPlannableResource)(nil)
)
func (n *nodeExpandPlannableResource) Name() string {
return n.NodeAbstractResource.Name() + " (expand)"
}
+func (n *nodeExpandPlannableResource) expandsInstances() {
+}
+
// GraphNodeAttachDependencies
func (n *nodeExpandPlannableResource) AttachDependencies(deps []addrs.ConfigResource) {
n.dependencies = deps
@@ -90,23 +93,8 @@
expander := ctx.InstanceExpander()
moduleInstances := expander.ExpandModule(n.Addr.Module)
- // Add the current expanded resource to the graph
- for _, module := range moduleInstances {
- resAddr := n.Addr.Resource.Absolute(module)
- g.Add(&NodePlannableResource{
- NodeAbstractResource: n.NodeAbstractResource,
- Addr: resAddr,
- ForceCreateBeforeDestroy: n.ForceCreateBeforeDestroy,
- dependencies: n.dependencies,
- skipRefresh: n.skipRefresh,
- skipPlanChanges: n.skipPlanChanges,
- forceReplace: n.forceReplace,
- })
- }
-
// Lock the state while we inspect it
state := ctx.State().Lock()
- defer ctx.State().Unlock()
var orphans []*states.Resource
for _, res := range state.Resources(n.Addr) {
@@ -117,12 +105,18 @@
break
}
}
- // Address form state was not found in the current config
+ // The module instance of the resource in the state doesn't exist
+ // in the current config, so this whole resource is orphaned.
if !found {
orphans = append(orphans, res)
}
}
+ // We'll no longer use the state directly here, and the other functions
+ // we'll call below may use it so we'll release the lock.
+ state = nil
+ ctx.State().Unlock()
+
// The concrete resource factory we'll use for orphans
concreteResourceOrphan := func(a *NodeAbstractResourceInstance) *NodePlannableResourceInstanceOrphan {
// Add the config and state since we don't do that via transforms
@@ -150,72 +144,68 @@
}
}
- return &g, nil
-}
-
-// NodePlannableResource represents a resource that is "plannable":
-// it is ready to be planned in order to create a diff.
-type NodePlannableResource struct {
- *NodeAbstractResource
-
- Addr addrs.AbsResource
-
- // ForceCreateBeforeDestroy might be set via our GraphNodeDestroyerCBD
- // during graph construction, if dependencies require us to force this
- // on regardless of what the configuration says.
- ForceCreateBeforeDestroy *bool
-
- // skipRefresh indicates that we should skip refreshing individual instances
- skipRefresh bool
-
- // skipPlanChanges indicates we should skip trying to plan change actions
- // for any instances.
- skipPlanChanges bool
-
- // forceReplace are resource instance addresses where the user wants to
- // force generating a replace action. This set isn't pre-filtered, so
- // it might contain addresses that have nothing to do with the resource
- // that this node represents, which the node itself must therefore ignore.
- forceReplace []addrs.AbsResourceInstance
-
- dependencies []addrs.ConfigResource
-}
-
-var (
- _ GraphNodeModuleInstance = (*NodePlannableResource)(nil)
- _ GraphNodeDestroyerCBD = (*NodePlannableResource)(nil)
- _ GraphNodeDynamicExpandable = (*NodePlannableResource)(nil)
- _ GraphNodeReferenceable = (*NodePlannableResource)(nil)
- _ GraphNodeReferencer = (*NodePlannableResource)(nil)
- _ GraphNodeConfigResource = (*NodePlannableResource)(nil)
- _ GraphNodeAttachResourceConfig = (*NodePlannableResource)(nil)
-)
+ // The above dealt with the expansion of the containing module, so now
+ // we need to deal with the expansion of the resource itself across all
+ // instances of the module.
+ //
+ // We'll gather up all of the leaf instances we learn about along the way
+ // so that we can inform the checks subsystem of which instances it should
+ // be expecting check results for, below.
+ var diags tfdiags.Diagnostics
+ instAddrs := addrs.MakeSet[addrs.Checkable]()
+ for _, module := range moduleInstances {
+ resAddr := n.Addr.Resource.Absolute(module)
+ err := n.expandResourceInstances(ctx, resAddr, &g, instAddrs)
+ diags = diags.Append(err)
+ }
+ if diags.HasErrors() {
+ return nil, diags.ErrWithWarnings()
+ }
-func (n *NodePlannableResource) Path() addrs.ModuleInstance {
- return n.Addr.Module
-}
+ // If this is a resource that participates in custom condition checks
+ // (i.e. it has preconditions or postconditions) then the check state
+ // wants to know the addresses of the checkable objects so that it can
+ // treat them as unknown status if we encounter an error before actually
+ // visiting the checks.
+ if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
+ checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, instAddrs)
+ }
-func (n *NodePlannableResource) Name() string {
- return n.Addr.String()
+ return &g, diags.ErrWithWarnings()
}
-// GraphNodeExecutable
-func (n *NodePlannableResource) Execute(ctx EvalContext, op walkOperation) tfdiags.Diagnostics {
+// expandResourceInstances calculates the dynamic expansion for the resource
+// itself in the context of a particular module instance.
+//
+// It has several side-effects:
+// - Adds a node to Graph g for each leaf resource instance it discovers, whether present or orphaned.
+// - Registers the expansion of the resource in the "expander" object embedded inside EvalContext ctx.
+// - Adds each present (non-orphaned) resource instance address to instAddrs (guaranteed to always be addrs.AbsResourceInstance, despite being declared as addrs.Checkable).
+//
+// After calling this for each of the module instances the resource appears
+// within, the caller must register the final superset instAddrs with the
+// checks subsystem so that it knows the fully expanded set of checkable
+// object instances for this resource instance.
+func (n *nodeExpandPlannableResource) expandResourceInstances(globalCtx EvalContext, resAddr addrs.AbsResource, g *Graph, instAddrs addrs.Set[addrs.Checkable]) error {
var diags tfdiags.Diagnostics
if n.Config == nil {
// Nothing to do, then.
- log.Printf("[TRACE] NodeApplyableResource: no configuration present for %s", n.Name())
- return diags
+ log.Printf("[TRACE] nodeExpandPlannableResource: no configuration present for %s", n.Name())
+ return diags.ErrWithWarnings()
}
+ // The rest of our work here needs to know which module instance it's
+ // working in, so that it can evaluate expressions in the appropriate scope.
+ moduleCtx := globalCtx.WithPath(resAddr.Module)
+
// writeResourceState is responsible for informing the expander of what
// repetition mode this resource has, which allows expander.ExpandResource
// to work below.
- moreDiags := n.writeResourceState(ctx, n.Addr)
+ moreDiags := n.writeResourceState(moduleCtx, resAddr)
diags = diags.Append(moreDiags)
if moreDiags.HasErrors() {
- return diags
+ return diags.ErrWithWarnings()
}
// Before we expand our resource into potentially many resource instances,
@@ -223,8 +213,8 @@
// consistent with the repetition mode of the resource. In other words,
// we're aiming to catch a situation where naming a particular resource
// instance would require an instance key but the given address has none.
- expander := ctx.InstanceExpander()
- instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path()))
+ expander := moduleCtx.InstanceExpander()
+ instanceAddrs := expander.ExpandResource(resAddr)
// If there's a number of instances other than 1 then we definitely need
// an index.
@@ -279,60 +269,42 @@
}
}
// NOTE: The actual interpretation of n.forceReplace to produce replace
- // actions is in NodeAbstractResourceInstance.plan, because we must do so
- // on a per-instance basis rather than for the whole resource.
-
- return diags
-}
-
-// GraphNodeDestroyerCBD
-func (n *NodePlannableResource) CreateBeforeDestroy() bool {
- if n.ForceCreateBeforeDestroy != nil {
- return *n.ForceCreateBeforeDestroy
- }
+ // actions is in the per-instance function we're about to call, because
+ // we need to evaluate it on a per-instance basis.
- // If we have no config, we just assume no
- if n.Config == nil || n.Config.Managed == nil {
- return false
+ for _, addr := range instanceAddrs {
+ // If this resource is participating in the "checks" mechanism then our
+ // caller will need to know all of our expanded instance addresses as
+ // checkable object instances.
+ // (NOTE: instAddrs probably already has other instance addresses in it
+ // from earlier calls to this function with different resource addresses,
+ // because its purpose is to aggregate them all together into a single set.)
+ instAddrs.Add(addr)
+ }
+
+ // Our graph builder mechanism expects to always be constructing new
+ // graphs rather than adding to existing ones, so we'll first
+ // construct a subgraph just for this individual modules's instances and
+ // then we'll steal all of its nodes and edges to incorporate into our
+ // main graph which contains all of the resource instances together.
+ instG, err := n.resourceInstanceSubgraph(moduleCtx, resAddr, instanceAddrs)
+ if err != nil {
+ diags = diags.Append(err)
+ return diags.ErrWithWarnings()
}
+ g.Subsume(&instG.AcyclicGraph.Graph)
- return n.Config.Managed.CreateBeforeDestroy
-}
-
-// GraphNodeDestroyerCBD
-func (n *NodePlannableResource) ModifyCreateBeforeDestroy(v bool) error {
- n.ForceCreateBeforeDestroy = &v
- return nil
+ return diags.ErrWithWarnings()
}
-// GraphNodeDynamicExpandable
-func (n *NodePlannableResource) DynamicExpand(ctx EvalContext) (*Graph, error) {
+func (n *nodeExpandPlannableResource) resourceInstanceSubgraph(ctx EvalContext, addr addrs.AbsResource, instanceAddrs []addrs.AbsResourceInstance) (*Graph, error) {
var diags tfdiags.Diagnostics
- // Our instance expander should already have been informed about the
- // expansion of this resource and of all of its containing modules, so
- // it can tell us which instance addresses we need to process.
- expander := ctx.InstanceExpander()
- instanceAddrs := expander.ExpandResource(n.ResourceAddr().Absolute(ctx.Path()))
-
// Our graph transformers require access to the full state, so we'll
// temporarily lock it while we work on this.
state := ctx.State().Lock()
defer ctx.State().Unlock()
- // If this is a resource that participates in custom condition checks
- // (i.e. it has preconditions or postconditions) then the check state
- // wants to know the addresses of the checkable objects so that it can
- // treat them as unknown status if we encounter an error before actually
- // visiting the checks.
- if checkState := ctx.Checks(); checkState.ConfigHasChecks(n.NodeAbstractResource.Addr) {
- checkableAddrs := addrs.MakeSet[addrs.Checkable]()
- for _, addr := range instanceAddrs {
- checkableAddrs.Add(addr)
- }
- checkState.ReportCheckableObjects(n.NodeAbstractResource.Addr, checkableAddrs)
- }
-
// The concrete resource factory we'll use
concreteResource := func(a *NodeAbstractResourceInstance) dag.Vertex {
// check if this node is being imported first
@@ -397,7 +369,7 @@
// Add the count/for_each orphans
&OrphanResourceInstanceCountTransformer{
Concrete: concreteResourceOrphan,
- Addr: n.Addr,
+ Addr: addr,
InstanceAddrs: instanceAddrs,
State: state,
},
@@ -418,8 +390,8 @@
// Build the graph
b := &BasicGraphBuilder{
Steps: steps,
- Name: "NodePlannableResource",
+ Name: "nodeExpandPlannableResource",
}
- graph, diags := b.Build(ctx.Path())
+ graph, diags := b.Build(addr.Module)
return graph, diags.ErrWithWarnings()
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/node_resource_plan_test.go new/terraform-1.3.1/internal/terraform/node_resource_plan_test.go
--- old/terraform-1.3.0/internal/terraform/node_resource_plan_test.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/node_resource_plan_test.go 1970-01-01 01:00:00.000000000 +0100
@@ -1,63 +0,0 @@
-package terraform
-
-import (
- "testing"
-
- "github.com/hashicorp/terraform/internal/addrs"
- "github.com/hashicorp/terraform/internal/configs"
- "github.com/hashicorp/terraform/internal/instances"
- "github.com/hashicorp/terraform/internal/states"
-)
-
-func TestNodePlannableResourceExecute(t *testing.T) {
- state := states.NewState()
- ctx := &MockEvalContext{
- StateState: state.SyncWrapper(),
- InstanceExpanderExpander: instances.NewExpander(),
- }
-
- t.Run("no config", func(t *testing.T) {
- node := NodePlannableResource{
- NodeAbstractResource: &NodeAbstractResource{
- Config: nil,
- },
- Addr: mustAbsResourceAddr("test_instance.foo"),
- }
- diags := node.Execute(ctx, walkApply)
- if diags.HasErrors() {
- t.Fatalf("unexpected error: %s", diags.Err())
- }
- if !state.Empty() {
- t.Fatalf("expected no state, got:\n %s", state.String())
- }
- })
-
- t.Run("simple", func(t *testing.T) {
-
- node := NodePlannableResource{
- NodeAbstractResource: &NodeAbstractResource{
- Config: &configs.Resource{
- Mode: addrs.ManagedResourceMode,
- Type: "test_instance",
- Name: "foo",
- },
- ResolvedProvider: addrs.AbsProviderConfig{
- Provider: addrs.NewDefaultProvider("test"),
- Module: addrs.RootModule,
- },
- },
- Addr: mustAbsResourceAddr("test_instance.foo"),
- }
- diags := node.Execute(ctx, walkApply)
- if diags.HasErrors() {
- t.Fatalf("unexpected error: %s", diags.Err())
- }
- if state.Empty() {
- t.Fatal("expected resources in state, got empty state")
- }
- r := state.Resource(mustAbsResourceAddr("test_instance.foo"))
- if r == nil {
- t.Fatal("test_instance.foo not found in state")
- }
- })
-}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf new/terraform-1.3.1/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf
--- old/terraform-1.3.0/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/testdata/plan-ignore-changes-wildcard/main.tf 2022-09-28 15:46:33.000000000 +0200
@@ -5,6 +5,7 @@
resource "aws_instance" "foo" {
ami = "${var.foo}"
instance = "${var.bar}"
+ foo = "bar"
lifecycle {
ignore_changes = all
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/transform_destroy_edge.go new/terraform-1.3.1/internal/terraform/transform_destroy_edge.go
--- old/terraform-1.3.0/internal/terraform/transform_destroy_edge.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/transform_destroy_edge.go 2022-09-28 15:46:33.000000000 +0200
@@ -39,6 +39,63 @@
// still subnets.
type DestroyEdgeTransformer struct{}
+// tryInterProviderDestroyEdge checks if we're inserting a destroy edge
+// across a provider boundary, and only adds the edge if it results in no cycles.
+//
+// FIXME: The cycles can arise in valid configurations when a provider depends
+// on resources from another provider. In the future we may want to inspect
+// the dependencies of the providers themselves, to avoid needing to use the
+// blunt hammer of checking for cycles.
+//
+// A reduced example of this dependency problem looks something like:
+/*
+
+createA <- createB
+ | \ / |
+ | providerB <- |
+ v \ v
+destroyA -------------> destroyB
+
+*/
+//
+// The edge from destroyA to destroyB would be skipped in this case, but there
+// are still other combinations of changes which could connect the A and B
+// groups around providerB in various ways.
+//
+// The most difficult problem here happens during a full destroy operation.
+// That creates a special case where resources on which a provider depends must
+// exist for evaluation before they are destroyed. This means that any provider
+// dependencies must wait until all that provider's resources have first been
+// destroyed. This is where these cross-provider edges are still required to
+// ensure the correct order.
+func (t *DestroyEdgeTransformer) tryInterProviderDestroyEdge(g *Graph, from, to dag.Vertex) {
+ e := dag.BasicEdge(from, to)
+ g.Connect(e)
+
+ pc, ok := from.(GraphNodeProviderConsumer)
+ if !ok {
+ return
+ }
+ fromProvider := pc.Provider()
+
+ pc, ok = to.(GraphNodeProviderConsumer)
+ if !ok {
+ return
+ }
+ toProvider := pc.Provider()
+
+ sameProvider := fromProvider.Equals(toProvider)
+
+ // Check for cycles, and back out the edge if there are any.
+ // The cycles we are looking for only appears between providers, so don't
+ // waste time checking for cycles if both nodes use the same provider.
+ if !sameProvider && len(g.Cycles()) > 0 {
+ log.Printf("[DEBUG] DestroyEdgeTransformer: skipping inter-provider edge %s->%s which creates a cycle",
+ dag.VertexName(from), dag.VertexName(to))
+ g.RemoveEdge(e)
+ }
+}
+
func (t *DestroyEdgeTransformer) Transform(g *Graph) error {
// Build a map of what is being destroyed (by address string) to
// the list of destroyers.
@@ -93,7 +150,7 @@
for _, desDep := range destroyersByResource[resAddr.String()] {
if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(desDep, des) {
log.Printf("[TRACE] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(desDep), dag.VertexName(des))
- g.Connect(dag.BasicEdge(desDep, des))
+ t.tryInterProviderDestroyEdge(g, desDep, des)
} else {
log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(desDep), dag.VertexName(des))
}
@@ -105,7 +162,7 @@
for _, createDep := range creators[resAddr.String()] {
if !graphNodesAreResourceInstancesInDifferentInstancesOfSameModule(createDep, des) {
log.Printf("[DEBUG] DestroyEdgeTransformer: %s has stored dependency of %s\n", dag.VertexName(createDep), dag.VertexName(des))
- g.Connect(dag.BasicEdge(createDep, des))
+ t.tryInterProviderDestroyEdge(g, createDep, des)
} else {
log.Printf("[TRACE] DestroyEdgeTransformer: skipping %s => %s inter-module-instance dependency\n", dag.VertexName(createDep), dag.VertexName(des))
}
@@ -170,9 +227,18 @@
// closers also need to disable their use of expansion if the module itself is
// no longer present.
type pruneUnusedNodesTransformer struct {
+ // The plan graph builder will skip this transformer except during a full
+ // destroy. Planing normally involves all nodes, but during a destroy plan
+ // we may need to prune things which are in the configuration but do not
+ // exist in state to evaluate.
+ skip bool
}
func (t *pruneUnusedNodesTransformer) Transform(g *Graph) error {
+ if t.skip {
+ return nil
+ }
+
// We need a reverse depth first walk of modules, processing them in order
// from the leaf modules to the root. This allows us to remove unneeded
// dependencies from child modules, freeing up nodes in the parent module
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/transform_expand.go new/terraform-1.3.1/internal/terraform/transform_expand.go
--- old/terraform-1.3.0/internal/terraform/transform_expand.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/transform_expand.go 2022-09-28 15:46:33.000000000 +0200
@@ -5,5 +5,13 @@
// These nodes are given the eval context and are expected to return
// a new subgraph.
type GraphNodeDynamicExpandable interface {
+ // DynamicExpand returns a new graph which will be treated as the dynamic
+ // subgraph of the receiving node.
+ //
+ // The second return value is of type error for historical reasons;
+ // it's valid (and most ideal) for DynamicExpand to return the result
+ // of calling ErrWithWarnings on a tfdiags.Diagnostics value instead,
+ // in which case the caller will unwrap it and gather the individual
+ // diagnostics.
DynamicExpand(EvalContext) (*Graph, error)
}
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/internal/terraform/transform_root.go new/terraform-1.3.1/internal/terraform/transform_root.go
--- old/terraform-1.3.0/internal/terraform/transform_root.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/internal/terraform/transform_root.go 2022-09-28 15:46:33.000000000 +0200
@@ -15,11 +15,21 @@
return nil
}
- // Add a root
+ // We intentionally add a graphNodeRoot value -- rather than a pointer to
+ // one -- so that all root nodes will coalesce together if two graphs
+ // are merged. Each distinct node value can only be in a graph once,
+ // so adding another graphNodeRoot value to the same graph later will
+ // be a no-op and all of the edges from root nodes will coalesce together
+ // under Graph.Subsume.
+ //
+ // It's important to retain this coalescing guarantee under future
+ // maintenence.
var root graphNodeRoot
g.Add(root)
- // Connect the root to all the edges that need it
+ // We initially make the root node depend on every node except itself.
+ // If the caller subsequently runs transitive reduction on the graph then
+ // it's typical for some of these edges to then be removed.
for _, v := range g.Vertices() {
if v == root {
continue
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/version/version.go new/terraform-1.3.1/version/version.go
--- old/terraform-1.3.0/version/version.go 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/version/version.go 2022-09-28 15:46:33.000000000 +0200
@@ -11,7 +11,7 @@
)
// The main version number that is being run at the moment.
-var Version = "1.3.0"
+var Version = "1.3.1"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/website/docs/cli/workspaces/index.mdx new/terraform-1.3.1/website/docs/cli/workspaces/index.mdx
--- old/terraform-1.3.0/website/docs/cli/workspaces/index.mdx 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/website/docs/cli/workspaces/index.mdx 2022-09-28 15:46:33.000000000 +0200
@@ -7,72 +7,80 @@
# Managing Workspaces
-In Terraform CLI, _workspaces_ are separate instances of
-[state data](/language/state) that can be used from the same working
-directory. You can use workspaces to manage multiple non-overlapping groups of
-resources with the same configuration.
-
-- Every [initialized working directory](/cli/init) has at least
- one workspace. (If you haven't created other workspaces, it is a workspace
- named `default`.)
-- For a given working directory, only one workspace can be _selected_ at a time.
-- Most Terraform commands (including [provisioning](/cli/run)
- and [state manipulation](/cli/state) commands) only interact
- with the currently selected workspace.
-- Use [the `terraform workspace select` command](/cli/commands/workspace/select)
- to change the currently selected workspace.
-- Use the [`terraform workspace list`](/cli/commands/workspace/list),
- [`terraform workspace new`](/cli/commands/workspace/new), and
- [`terraform workspace delete`](/cli/commands/workspace/delete) commands
- to manage the available workspaces in the current working directory.
-
--> **Note:** Terraform Cloud and Terraform CLI both have features called
-"workspaces," but they're slightly different. Terraform Cloud's workspaces
-behave more like completely separate working directories.
-
-## The Purpose of Workspaces
-
-Since most of the resources you can manage with Terraform don't include a unique
-name as part of their configuration, it's common to use the same Terraform
-configuration to provision multiple groups of similar resources.
-
-Terraform relies on [state](/language/state) to associate resources with
-real-world objects, so if you run the same configuration multiple times with
-completely separate state data, Terraform can manage many non-overlapping groups
-of resources. In some cases you'll want to change
-[variable values](/language/values/variables) for these different
-resource collections (like when specifying differences between staging and
-production deployments), and in other cases you might just want many instances
-of a particular infrastructure pattern.
-
-The simplest way to maintain multiple instances of a configuration with
-completely separate state data is to use multiple
-[working directories](/cli/init) (with different
-[backend](/language/settings/backends/configuration) configurations per directory, if you
-aren't using the default `local` backend).
-
-However, this isn't always the most _convenient_ way to handle separate states.
-Terraform installs a separate cache of plugins and modules for each working
-directory, so maintaining multiple directories can waste bandwidth and disk
-space. You must also update your configuration code from version control
-separately for each directory, reinitialize each directory separately when
-changing the configuration, etc.
-
-Workspaces allow you to use the same working copy of your configuration and the
-same plugin and module caches, while still keeping separate states for each
-collection of resources you manage.
+Workspaces in the Terraform CLI refer to separate instances of [state data](/language/state) inside the same Terraform working directory. They are distinctly different from [workspaces in Terraform Cloud](/cloud-docs/workspaces), which each have their own Terraform configuration and function as separate working directories.
+
+Terraform relies on state to associate resources with real-world objects. When you run the same configuration multiple times with separate state data, Terraform can manage multiple sets of non-overlapping resources.
+
+Workspaces can be helpful for specific [use cases](#use-cases), but they are not required to use the Terraform CLI. We recommend using [alternative approaches](#alternatives-to-workspaces) for complex deployments requiring separate credentials and access controls.
+
+
+## Managing CLI Workspaces
+
+Every [initialized working directory](/cli/init) starts with one workspace named `default`.
+
+Use the [`terraform workspace list`](/cli/commands/workspace/list), [`terraform workspace new`](/cli/commands/workspace/new), and [`terraform workspace delete`](/cli/commands/workspace/delete) commands to manage the available workspaces in the current working directory.
+
+Use [the `terraform workspace select` command](/cli/commands/workspace/select) to change the currently selected workspace. For a given working directory, you can only select one workspace can be at a time. Most Terraform commands only interact with the currently selected workspace. This includes [provisioning](/cli/run) and [state manipulation](/cli/state).
+
+When you provision infrastructure in each workspace, you usually need to manually specify different [input variables](/language/values/variables) to differentiate each collection. For example, you might deploy test infrastructure to a different region.
+
+
+## Use Cases
+
+You can create multiple [working directories](/cli/init) to maintain multiple instances of a configuration with completely separate state data. However, Terraform installs a separate cache of plugins and modules for each working directory, so maintaining multiple directories can waste bandwidth and disk space. This approach also requires extra tasks like updating configuration from version control for each directory separately and reinitializing each directory when you change the configuration. Workspaces are convenient because they let you create different sets of infrastructure with the same working copy of your configuration and the same plugin and module caches.
+
+A common use for multiple workspaces is to create a parallel, distinct copy of
+a set of infrastructure to test a set of changes before modifying production infrastructure.
+
+Non-default workspaces are often related to feature branches in version control.
+The default workspace might correspond to the `main` or `trunk` branch, which describes the intended state of production infrastructure. When a developer creates a feature branch for a change, they might also create a corresponding workspace and deploy into it a temporary copy of the main infrastructure. They can then test changes on the copy without affecting the production infrastructure. Once the change is merged and deployed to the default workspace, they destroy the test infrastructure and delete the temporary workspace.
+
+
+### When Not to Use Multiple Workspaces
+
+Workspaces let you quickly switch between multiple instances of a **single configuration** within its **single backend**. They are not designed to solve all problems.
+
+When using Terraform to manage larger systems, you should create separate Terraform configurations that correspond to architectural boundaries within the system. This lets teams manage different components separately. Workspaces alone are not a suitable tool for system decomposition because each subsystem should have its own separate configuration and backend.
+
+In particular, organizations commonly want to create a strong separation
+between multiple deployments of the same infrastructure serving different
+development stages or different internal teams. In this case, the backend for each deployment often has different credentials and access controls. CLI workspaces within a working directory use the same backend, so they are not a suitable isolation mechanism for this scenario.
+
+## Alternatives to Workspaces
+
+Instead of creating CLI workspaces, you can use one or more [re-usable modules](/language/modules/develop) to represent the common elements and then represent each instance as a separate configuration that instantiates those common elements in the context of a different [backend](/language/settings/backends/configuration). The root module of each configuration consists only of a backend configuration and a small number of `module` blocks with arguments describing any small differences between the deployments.
+
+When multiple configurations represent distinct system components rather than multiple deployments, you can pass data from one component to another using paired resources types and data sources.
+
+- When a shared [Consul](https://www.consul.io/) cluster is available, use [`consul_key_prefix`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/resources/key_prefix) to publish to the key/value store and [`consul_keys`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/keys) to retrieve those values in other configurations.
+
+- In systems that support user-defined labels or tags, use a tagging convention to make resources automatically discoverable. For example, use [the `aws_vpc` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc) to assign suitable tags and then [the `aws_vpc` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc) to query by those tags in other configurations.
+
+- For server addresses, use a provider-specific resource to create a DNS record with a predictable name. Then you can either use that name directly or use [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) to retrieve the published addresses in other configurations.
+
+- If you store a Terraform state for one configuration in a remote backend that other configurations can access, then the other configurations can use [`terraform_remote_state`](/language/state/remote-state-data) to directly consume its root module outputs. This setup creates a tighter coupling between configurations, and the root configuration does not need to publish its results in a separate system.
+
## Interactions with Terraform Cloud Workspaces
Terraform Cloud organizes infrastructure using workspaces, but its workspaces
-act more like completely separate working directories; each Terraform Cloud
+act more like completely separate working directories. Each Terraform Cloud
workspace has its own Terraform configuration, set of variable values, state
data, run history, and settings.
-These two kinds of workspaces are different, but related. When [using Terraform
-CLI as a frontend for Terraform Cloud](/cli/cloud), you can associate the current working
-directory with one or more remote workspaces. If you associate the
-directory with multiple workspaces (using workspace tags), you can use the
-`terraform workspace` commands to select which remote workspace to use.
+When you [integrate Terraform CLI with Terraform Cloud](/cli/cloud), you can associate the current CLI working directory with one or more remote Terraform Cloud workspaces. Then, use the `terraform workspace` commands to select the remote workspace you want to use for each run.
+
+Refer to [CLI-driven Runs](/cloud-docs/run/cli) in the Terraform Cloud documentation for more details.
+
+
+## Workspace Internals
+
+Workspaces are technically equivalent to renaming your state file. Terraform then includes a set of protections and support for remote state.
+
+Workspaces are also meant to be a shared resource. They are not private, unless you use purely local state and do not commit your state to version control.
+
+For local state, Terraform stores the workspace states in a directory called `terraform.tfstate.d`. This directory should be treated similarly to local-only `terraform.tfstate`. Some teams commit these files to version control, but we recommend using a remote backend instead when there are multiple collaborators.
+
+For [remote state](/language/state/remote), the workspaces are stored directly in the configured [backend](/language/settings/backends). For example, if you use [Consul](/language/settings/backends/consul), the workspaces are stored by appending the workspace name to the state path. To ensure that workspace names are stored correctly and safely in all backends, the name must be valid to use in a URL path segment without escaping.
-Refer to [CLI-driven Runs](/cloud-docs/run/cli) in the Terraform Cloud documentation for more details about using Terraform CLI with Terraform Cloud.
+Terraform stores the current workspace name locally in the ignored `.terraform` directory. This allows multiple team members to work on different workspaces concurrently. Workspace names are also attached to associated remote workspaces in Terraform Cloud. For more details about workspace names in Terraform Cloud, refer to the [CLI Integration (recommended)](/cli/cloud/settings#arguments) and [remote backend](/language/settings/backends/remote#workspaces) and documentation.
\ No newline at end of file
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/website/docs/language/functions/startswith.mdx new/terraform-1.3.1/website/docs/language/functions/startswith.mdx
--- old/terraform-1.3.0/website/docs/language/functions/startswith.mdx 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/website/docs/language/functions/startswith.mdx 2022-09-28 15:46:33.000000000 +0200
@@ -1,5 +1,5 @@
---
-page_title: startsswith - Functions - Configuration Language
+page_title: startswith - Functions - Configuration Language
description: |-
The startswith function takes two values: a string to check and a prefix string. It returns true if the string begins with that exact prefix.
---
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/terraform-1.3.0/website/docs/language/state/workspaces.mdx new/terraform-1.3.1/website/docs/language/state/workspaces.mdx
--- old/terraform-1.3.0/website/docs/language/state/workspaces.mdx 2022-09-21 15:40:32.000000000 +0200
+++ new/terraform-1.3.1/website/docs/language/state/workspaces.mdx 2022-09-28 15:46:33.000000000 +0200
@@ -7,71 +7,39 @@
# Workspaces
-Each Terraform configuration has an associated [backend](/language/settings/backends)
-that defines how operations are executed and where persistent data such as
-[the Terraform state](/language/state/purpose) are
-stored.
-
-The persistent data stored in the backend belongs to a _workspace_. Initially
-the backend has only one workspace, called "default", and thus there is only
-one Terraform state associated with that configuration.
-
-Certain backends support _multiple_ named workspaces, allowing multiple states
-to be associated with a single configuration. The configuration still
-has only one backend, but multiple distinct instances of that configuration
-to be deployed without configuring a new backend or changing authentication
+Each Terraform configuration has an associated [backend](/language/settings/backends) that defines how Terraform executes operations and where Terraform stores persistent data, like [state](/language/state/purpose).
+
+The persistent data stored in the backend belongs to a workspace. The backend initially has only one workspace containing one Terraform state associated with that configuration. Some backends support multiple named workspaces, allowing multiple states to be associated with a single configuration. The configuration still has only one backend, but you can deploy multiple distinct instances of that configuration without configuring a new backend or changing authentication
credentials.
-Multiple workspaces are currently supported by the following backends:
+-> **Note**: The Terraform CLI workspaces are different from [workspaces in Terraform Cloud](/cloud-docs/workspaces). Refer to [Initializing and Migrating](/cli/cloud/migrating) for details about migrating a configuration with multiple workspaces to Terraform Cloud.
+
+## Backends Supporting Multiple Workspaces
+
+You can use multiple workspaces with the following backends:
+
+- [AzureRM](/language/settings/backends/azurerm)
+- [Consul](/language/settings/backends/consul)
+- [COS](/language/settings/backends/cos)
+- [GCS](/language/settings/backends/gcs)
+- [Kubernetes](/language/settings/backends/kubernetes)
+- [Local](/language/settings/backends/local)
+- [OSS](/language/settings/backends/oss)
+- [Postgres](/language/settings/backends/pg)
+- [Remote](/language/settings/backends/remote)
+- [S3](/language/settings/backends/s3)
-* [AzureRM](/language/settings/backends/azurerm)
-* [Consul](/language/settings/backends/consul)
-* [COS](/language/settings/backends/cos)
-* [GCS](/language/settings/backends/gcs)
-* [Kubernetes](/language/settings/backends/kubernetes)
-* [Local](/language/settings/backends/local)
-* [OSS](/language/settings/backends/oss)
-* [Postgres](/language/settings/backends/pg)
-* [Remote](/language/settings/backends/remote)
-* [S3](/language/settings/backends/s3)
-
-In the 0.9 line of Terraform releases, this concept was known as "environment".
-It was renamed in 0.10 based on feedback about confusion caused by the
-overloading of the word "environment" both within Terraform itself and within
-organizations that use Terraform.
-
--> **Note**: The Terraform CLI workspace concept described in this document is
-different from but related to the Terraform Cloud
-[workspace](/cloud-docs/workspaces) concept.
-If you use multiple Terraform CLI workspaces in a single Terraform configuration
-and are migrating that configuration to Terraform Cloud, refer to [Initializing and Migrating](/cli/cloud/migrating).
## Using Workspaces
-Terraform starts with a single workspace named "default". This
-workspace is special both because it is the default and also because
-it cannot ever be deleted. If you've never explicitly used workspaces, then
-you've only ever worked on the "default" workspace.
-
-Workspaces are managed with the `terraform workspace` set of commands. To
-create a new workspace and switch to it, you can use `terraform workspace new`;
-to switch workspaces you can use `terraform workspace select`; etc.
-
-For example, creating a new workspace:
-
-```text
-$ terraform workspace new bar
-Created and switched to workspace "bar"!
-
-You're now on a new, empty workspace. Workspaces isolate their state,
-so if you run "terraform plan" Terraform will not see any existing state
-for this configuration.
-```
+~> **Important:** Workspaces are not appropriate for system decomposition or deployments requiring separate credentials and access controls. Refer to [Use Cases](/cli/workspaces#use-cases) in the Terraform CLI documentation for details and recommended alternatives.
+
+Terraform starts with a single, default workspace named `default` that you cannot delete. If you have not created a new workspace, you are using the default workspace in your Terraform working directory.
+
+When you run `terraform plan` in a new workspace, Terraform does not access existing resources in other workspaces. These resources still physically exist, but you must switch workspaces to manage them.
+
+Refer to the [Terraform CLI workspaces](/cli/workspaces) documentation for full details about how to create and use workspaces.
-As the command says, if you run `terraform plan`, Terraform will not see
-any existing resources that existed on the default (or any other) workspace.
-**These resources still physically exist,** but are managed in another
-Terraform workspace.
## Current Workspace Interpolation
@@ -103,103 +71,3 @@
# ... other arguments
}
```
-
-## When to use Multiple Workspaces
-
-Named workspaces allow conveniently switching between multiple instances of
-a _single_ configuration within its _single_ backend. They are convenient in
-a number of situations, but cannot solve all problems.
-
-A common use for multiple workspaces is to create a parallel, distinct copy of
-a set of infrastructure in order to test a set of changes before modifying the
-main production infrastructure. For example, a developer working on a complex
-set of infrastructure changes might create a new temporary workspace in order
-to freely experiment with changes without affecting the default workspace.
-
-Non-default workspaces are often related to feature branches in version control.
-The default workspace might correspond to the "main" or "trunk" branch,
-which describes the intended state of production infrastructure. When a
-feature branch is created to develop a change, the developer of that feature
-might create a corresponding workspace and deploy into it a temporary "copy"
-of the main infrastructure so that changes can be tested without affecting
-the production infrastructure. Once the change is merged and deployed to the
-default workspace, the test infrastructure can be destroyed and the temporary
-workspace deleted.
-
-When Terraform is used to manage larger systems, teams should use multiple
-separate Terraform configurations that correspond with suitable architectural
-boundaries within the system so that different components can be managed
-separately and, if appropriate, by distinct teams. Workspaces _alone_
-are not a suitable tool for system decomposition, because each subsystem should
-have its own separate configuration and backend, and will thus have its own
-distinct set of workspaces.
-
-In particular, organizations commonly want to create a strong separation
-between multiple deployments of the same infrastructure serving different
-development stages (e.g. staging vs. production) or different internal teams.
-In this case, the backend used for each deployment often belongs to that
-deployment, with different credentials and access controls. Named workspaces
-are _not_ a suitable isolation mechanism for this scenario.
-
-Instead, use one or more [re-usable modules](/language/modules/develop) to
-represent the common elements, and then represent each instance as a separate
-configuration that instantiates those common elements in the context of a
-different backend. In that case, the root module of each configuration will
-consist only of a backend configuration and a small number of `module` blocks
-whose arguments describe any small differences between the deployments.
-
-Where multiple configurations are representing distinct system components
-rather than multiple deployments, data can be passed from one component to
-another using paired resources types and data sources. For example:
-
-* Where a shared [Consul](https://www.consul.io/) cluster is available, use
- [`consul_key_prefix`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/resources/key_prefix) to
- publish to the key/value store and [`consul_keys`](https://registry.terraform.io/providers/hashicorp/consul/latest/docs/data-sources/keys)
- to retrieve those values in other configurations.
-
-* In systems that support user-defined labels or tags, use a tagging convention
- to make resources automatically discoverable. For example, use
- [the `aws_vpc` resource type](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/vpc)
- to assign suitable tags and then
- [the `aws_vpc` data source](https://registry.terraform.io/providers/hashicorp/aws/latest/docs/data-sources/vpc)
- to query by those tags in other configurations.
-
-* For server addresses, use a provider-specific resource to create a DNS
- record with a predictable name and then either use that name directly or
- use [the `dns` provider](https://registry.terraform.io/providers/hashicorp/dns/latest/docs) to retrieve
- the published addresses in other configurations.
-
-* If a Terraform state for one configuration is stored in a remote backend
- that is accessible to other configurations then
- [`terraform_remote_state`](/language/state/remote-state-data)
- can be used to directly consume its root module outputs from those other
- configurations. This creates a tighter coupling between configurations,
- but avoids the need for the "producer" configuration to explicitly
- publish its results in a separate system.
-
-## Workspace Internals
-
-Workspaces are technically equivalent to renaming your state file. They
-aren't any more complex than that. Terraform wraps this simple notion with
-a set of protections and support for remote state.
-
-For local state, Terraform stores the workspace states in a directory called
-`terraform.tfstate.d`. This directory should be treated similarly to
-local-only `terraform.tfstate`; some teams commit these files to version
-control, although using a remote backend instead is recommended when there are
-multiple collaborators.
-
-For [remote state](/language/state/remote), the workspaces are stored
-directly in the configured [backend](/language/settings/backends). For example, if you
-use [Consul](/language/settings/backends/consul), the workspaces are stored
-by appending the workspace name to the state path. To ensure that
-workspace names are stored correctly and safely in all backends, the name
-must be valid to use in a URL path segment without escaping.
-
-The important thing about workspace internals is that workspaces are
-meant to be a shared resource. They aren't a private, local-only notion
-(unless you're using purely local state and not committing it).
-
-The "current workspace" name is stored locally in the ignored
-`.terraform` directory. This allows multiple team members to work on
-different workspaces concurrently. Workspace names are also attached to associated remote workspaces in Terraform Cloud. For more details about workspace names in Terraform Cloud, refer to the [remote backend](/language/settings/backends/remote#workspaces) and [CLI Integration (recommended)](/cli/cloud/settings#arguments) documentation.
++++++ terraform.obsinfo ++++++
--- /var/tmp/diff_new_pack.6a1BDT/_old 2022-09-30 17:58:52.521387248 +0200
+++ /var/tmp/diff_new_pack.6a1BDT/_new 2022-09-30 17:58:52.525387257 +0200
@@ -1,5 +1,5 @@
name: terraform
-version: 1.3.0
-mtime: 1663767632
-commit: 5c239ecd6ac3a183bb4852940f2f2d1af1a766ce
+version: 1.3.1
+mtime: 1664372793
+commit: ebc1f295cafb53341f9b58c3c16099f98d3b58a6
++++++ vendor.tar.gz ++++++
/work/SRC/openSUSE:Factory/terraform/vendor.tar.gz /work/SRC/openSUSE:Factory/.terraform.new.2275/vendor.tar.gz differ: char 5, line 1
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package testssl.sh for openSUSE:Factory checked in at 2022-09-30 17:58:27
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/testssl.sh (Old)
and /work/SRC/openSUSE:Factory/.testssl.sh.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "testssl.sh"
Fri Sep 30 17:58:27 2022 rev:9 rq:1007161 version:3.0.8
Changes:
--------
--- /work/SRC/openSUSE:Factory/testssl.sh/testssl.sh.changes 2022-08-30 14:49:03.536055959 +0200
+++ /work/SRC/openSUSE:Factory/.testssl.sh.new.2275/testssl.sh.changes 2022-09-30 17:58:47.721376987 +0200
@@ -1,0 +2,27 @@
+Wed Sep 28 20:54:50 UTC 2022 - Jeff Kowalczyk <jkowalczyk(a)suse.com>
+
+- Update to version 3.0.8
+ * Fix grep 3.8 warnings on fgrep and unneeded escapes of hyphen, slash, space (Geert)
+ * Fix alignment for cipher output (David)
+ * News binaries (Darwin from Barry), carry now the appendix -bad and fixes a security problem.
+ * Backport from higher OpenSSL version to support xmpp-server
+ * Fix CT (David)
+ * Fix decryption of TLS 1.3 response (David)
+ * Upgrade Dockerfile to Alpine to 3.15
+ * Fix pretty JSON formatting when warning is issued (David)
+ * Update of certificate stores
+ * Major update of client simulation (9 new simulations , >4 removed in default run)
+ * Fix CRIME output on servers only supporting TLS 1.3 (Tomasz)
+ * Fix censys link
+ * Fix ome handshake problems w $OPENSSL ciphers, extend determine_optimal_sockets_params() to more
+ * ciphers, fix PROTOS_OFFERED (David)
+ * Relax STARTTLS FTP requirement so that it doesn't require TLS after AUTH
+ * Fix run_server_preference() with no default protocol (David)
+ * Fix getting CRL / NO_SESSION_ID under some circumstances (David)
+ * Improve/fix OpenSSL 3.0 compatibility (David)
+ * Fix formatting to documentation
+ * Add FFDHE groups to supported_groups (David)
+ * Include RSA-PSS in ClientHello (David)
+- Requires: bind-utils for required tools dig, host and nslookup
+
+-------------------------------------------------------------------
Old:
----
testssl.sh-3.0.7.tar.gz
New:
----
testssl.sh-3.0.8.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ testssl.sh.spec ++++++
--- /var/tmp/diff_new_pack.bdKJz1/_old 2022-09-30 17:58:49.513380818 +0200
+++ /var/tmp/diff_new_pack.bdKJz1/_new 2022-09-30 17:58:49.521380835 +0200
@@ -20,7 +20,7 @@
%define _data_dir_name testssl-sh
Name: testssl.sh
-Version: 3.0.7
+Version: 3.0.8
Release: 0
Summary: Testing TLS/SSL Encryption Anywhere On Any Port
License: GPL-2.0-or-later
@@ -30,6 +30,7 @@
Source1: %{name}-rpmlintrc
Patch0: testssl.sh-2.9.95-set-install-dir.patch
Requires: bash >= 3.2
+Requires: bind-utils
Requires: openssl
BuildArch: noarch
++++++ testssl.sh-3.0.7.tar.gz -> testssl.sh-3.0.8.tar.gz ++++++
/work/SRC/openSUSE:Factory/testssl.sh/testssl.sh-3.0.7.tar.gz /work/SRC/openSUSE:Factory/.testssl.sh.new.2275/testssl.sh-3.0.8.tar.gz differ: char 13, line 1
1
0
Script 'mail_helper' called by obssrc
Hello community,
here is the log from the commit of package kcov for openSUSE:Factory checked in at 2022-09-30 17:58:26
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Comparing /work/SRC/openSUSE:Factory/kcov (Old)
and /work/SRC/openSUSE:Factory/.kcov.new.2275 (New)
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "kcov"
Fri Sep 30 17:58:26 2022 rev:5 rq:1007170 version:40
Changes:
--------
--- /work/SRC/openSUSE:Factory/kcov/kcov.changes 2022-06-28 15:23:14.074019615 +0200
+++ /work/SRC/openSUSE:Factory/.kcov.new.2275/kcov.changes 2022-09-30 17:58:46.865375158 +0200
@@ -1,0 +2,30 @@
+Thu Sep 22 19:04:42 UTC 2022 - Antoine Belvire <antoine.belvire(a)opensuse.org>
+
+- Update to 40:
+ * Make libbfd non-required to avoid GPLv3
+- Changes from 39:
+ * Allow an address to map to multiple lines in the binary
+ (gh#SimonKagstrom/kcov/366).
+ * Correct abort signal return value (gh#SimonKagstrom/kcov/314).
+ * Escape json URLs (gh#SimonKagstrom/kcov/353).
+ * Match ELF perfect if it has the ELF magic
+ (gh#SimonKagstrom/kcov/339).
+ * Correct --collect-only, --report-only on non-identical
+ machines (gh#SimonKagstrom/kcov/342)
+ * Auto build via github actions (gh#SimonKagstrom/kcov/347).
+ * Allow regex in --replace-src-path (gh#SimonKagstrom/kcov/333).
+ * bash: Set PS4 in bash_helper for modern bashes to allow root
+ usage (gh#SimonKagstrom/kcov/331).
+ * Fix --merge with two --collect-only runs
+ (gh#SimonKagstrom/kcov/327).
+ * bash: Add --bash-tracefd-cloexec to avoid leaking trace fd
+ to children (gh#SimonKagstrom/kcov/325).
+ * bash: Drain stdout when on xtrace-fd
+ (gh#SimonKagstrom/kcov/323).
+ * Make kcov conan-ready (gh#SimonKagstrom/kcov/322).
+- Add kcov-40-binutils-2.39.patch: Fix build with binutils 2.39
+ (gh#SimonKagstrom/kcov#381, gh#SimonKagstrom/kcov!383).
+- Refresh spec file.
+- Remove unneeded build dependency on python3.
+
+-------------------------------------------------------------------
Old:
----
v38.tar.gz
New:
----
kcov-40-binutils-2.39.patch
v40.tar.gz
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences:
------------------
++++++ kcov.spec ++++++
--- /var/tmp/diff_new_pack.6lvkKp/_old 2022-09-30 17:58:47.365376226 +0200
+++ /var/tmp/diff_new_pack.6lvkKp/_new 2022-09-30 17:58:47.369376235 +0200
@@ -17,7 +17,7 @@
Name: kcov
-Version: 38
+Version: 40
Release: 0
Summary: Code coverage tool without special compilation options
License: GPL-2.0-only
@@ -25,14 +25,17 @@
URL: https://github.com/SimonKagstrom/kcov
Source0: https://github.com/SimonKagstrom/kcov/archive/v%{version}.tar.gz
Patch0: link_order.patch
+# PATCH-FIX-UPSTREAM kcov-40-binutils-2.39.patch -- Fix build with binutils 2.39 (gh#SimonKagstrom#381, gh#SimonKagstrom!383)
+Patch1: kcov-40-binutils-2.39.patch
BuildRequires: binutils-devel
+BuildRequires: c++_compiler
BuildRequires: cmake
-BuildRequires: gcc-c++
-BuildRequires: libcurl-devel
-BuildRequires: libdw-devel
-BuildRequires: libelf-devel
-BuildRequires: python3
-BuildRequires: zlib-devel
+BuildRequires: pkgconfig
+BuildRequires: pkgconfig(libcurl)
+BuildRequires: pkgconfig(libdw)
+BuildRequires: pkgconfig(libelf)
+BuildRequires: pkgconfig(openssl)
+BuildRequires: pkgconfig(zlib)
ExcludeArch: s390x
%description
@@ -42,25 +45,24 @@
long-running applications.
%prep
-%setup -q
-%patch0 -p1
+%autosetup -p1
# remove LLDB headers bundled for MacOS
rm -frv external/
%build
%cmake
-%make_build
+%cmake_build
%install
-cd build
-%make_install
+%cmake_install
+# ignore ChangeLog and COPYING*, they are handled with doc and license macros
+rm -r %{buildroot}%{_datadir}/doc/kcov
%files
-%doc ChangeLog README
+%doc ChangeLog README.md
%license COPYING*
-%{_bindir}/*
-%{_mandir}/man1/*
-# ignore ChangeLog and COPYING* files from install
-%exclude %{_datadir}/doc/kcov
+%{_bindir}/kcov
+%{_bindir}/kcov-system-daemon
+%{_mandir}/man1/kcov.1%{?ext_man}
%changelog
++++++ kcov-40-binutils-2.39.patch ++++++
From fd1a4fd2f02cee49afd74e427e38c61b89154582 Mon Sep 17 00:00:00 2001
From: oreo639 <oreo6391(a)gmail.com>
Date: Wed, 14 Sep 2022 16:02:17 -0700
Subject: [PATCH] Fix build with binutils 2.39
---
src/CMakeLists.txt | 20 +++++++++++++++++++-
src/parsers/bfd-disassembler.cc | 23 +++++++++++++++++++++++
2 files changed, 42 insertions(+), 1 deletion(-)
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 3b751852..fc396827 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -94,6 +94,7 @@ set (DISASSEMBLER_SRCS
)
set (HAS_LIBBFD "0")
+set (HAS_LIBBFD_DISASM_STYLED "0")
if (CMAKE_TARGET_ARCHITECTURES STREQUAL "i386" OR CMAKE_TARGET_ARCHITECTURES STREQUAL "x86_64")
if (LIBBFD_FOUND)
@@ -106,6 +107,23 @@ if (CMAKE_TARGET_ARCHITECTURES STREQUAL "i386" OR CMAKE_TARGET_ARCHITECTURES STR
${LIBBFD_BFD_LIBRARY}
${LIBBFD_IBERTY_LIBRARY}
)
+ include(CheckCSourceCompiles)
+ set(CMAKE_REQUIRED_LIBRARIES ${DISASSEMBLER_LIBRARIES})
+ check_c_source_compiles("
+ #define PACKAGE
+ #define PACKAGE_VERSION
+ #include <stdio.h>
+ #include <dis-asm.h>
+
+ int main(int argc, char **argv){
+ struct disassemble_info info;
+ init_disassemble_info(&info, stdout, NULL, NULL);
+ return 0;
+ }
+ " TEST_LIBBFD_DISASM_STYLED)
+ if (TEST_LIBBFD_DISASM_STYLED)
+ set (HAS_LIBBFD_DISASM_STYLED "1")
+ endif (TEST_LIBBFD_DISASM_STYLED)
endif (LIBBFD_FOUND)
endif (CMAKE_TARGET_ARCHITECTURES STREQUAL "i386" OR CMAKE_TARGET_ARCHITECTURES STREQUAL "x86_64")
@@ -284,7 +302,7 @@ set (KCOV_SYSTEM_MODE_SRCS
set (KCOV_LIBRARY_PREFIX "/tmp")
-set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -g -Wall -D_GLIBCXX_USE_NANOSLEEP -DKCOV_LIBRARY_PREFIX=${KCOV_LIBRARY_PREFIX} -DKCOV_HAS_LIBBFD=${HAS_LIBBFD}")
+set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x -g -Wall -D_GLIBCXX_USE_NANOSLEEP -DKCOV_LIBRARY_PREFIX=${KCOV_LIBRARY_PREFIX} -DKCOV_HAS_LIBBFD=${HAS_LIBBFD} -DKCOV_LIBFD_DISASM_STYLED=${HAS_LIBBFD_DISASM_STYLED}")
include_directories(
include/
diff --git a/src/parsers/bfd-disassembler.cc b/src/parsers/bfd-disassembler.cc
index 43653ee0..28815961 100644
--- a/src/parsers/bfd-disassembler.cc
+++ b/src/parsers/bfd-disassembler.cc
@@ -75,7 +75,11 @@ class BfdDisassembler : public IDisassembler
BfdDisassembler()
{
memset(&m_info, 0, sizeof(m_info));
+#if KCOV_LIBFD_DISASM_STYLED
+ init_disassemble_info(&m_info, (void *)this, BfdDisassembler::opcodesFprintFuncStatic, BfdDisassembler::opcodesFprintStyledFuncStatic);
+#else
init_disassemble_info(&m_info, (void *)this, BfdDisassembler::opcodesFprintFuncStatic);
+#endif
m_disassembler = print_insn_i386;
m_info.arch = bfd_arch_i386;
@@ -407,6 +411,25 @@ class BfdDisassembler : public IDisassembler
return out;
}
+#if KCOV_LIBFD_DISASM_STYLED
+ static int opcodesFprintStyledFuncStatic(void *info, enum disassembler_style style, const char *fmt, ...)
+ {
+ (void)style;
+ BfdDisassembler *pThis = (BfdDisassembler *)info;
+ char str[64];
+ int out;
+
+ va_list args;
+ va_start (args, fmt);
+ out = vsnprintf( str, sizeof(str) - 1, fmt, args );
+ va_end (args);
+
+ pThis->opcodesFprintFunc(str);
+
+ return out;
+ }
+#endif
+
typedef std::map<uint64_t, Section *> SectionCache_t;
typedef std::unordered_map<uint64_t, Instruction> InstructionAddressMap_t;
typedef std::map<uint64_t, Instruction *> InstructionOrderedMap_t;
++++++ v38.tar.gz -> v40.tar.gz ++++++
++++ 1884 lines of diff (skipped)
1
0