Script 'mail_helper' called by obssrc Hello community,
here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2022-12-30 11:08:50 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.1563 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Package is "crmsh"
Fri Dec 30 11:08:50 2022 rev:274 rq:1045770 version:4.4.1+20221228.326c28fd
Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2022-12-27 11:55:12.703364570 +0100 +++ /work/SRC/openSUSE:Factory/.crmsh.new.1563/crmsh.changes 2022-12-30 11:09:05.937305235 +0100 @@ -1,0 +2,15 @@ +Wed Dec 28 09:46:54 UTC 2022 - XLiang@suse.com + +- Update to version 4.4.1+20221228.326c28fd: + * Dev: report: 'crm report' to collect journal.log in microseconds + * Dev: doc: Add deprecated note for 'crm configure erase' + * Dev: ui_configure: Deprecate configure erase sub-command + +------------------------------------------------------------------- +Wed Dec 28 04:00:43 UTC 2022 - XLiang@suse.com + +- Update to version 4.4.1+20221228.7c16362c: + * Dev: unittest: Adjust unit test for previous changes + * Dev: ui_cluster: Improve cluster start/stop INFO + +-------------------------------------------------------------------
Old: ---- crmsh-4.4.1+20221227.bd7d326e.tar.bz2
New: ---- crmsh-4.4.1+20221228.326c28fd.tar.bz2
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.sO4leb/_old 2022-12-30 11:09:06.445308271 +0100 +++ /var/tmp/diff_new_pack.sO4leb/_new 2022-12-30 11:09:06.449308296 +0100 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 4.4.1+20221227.bd7d326e +Version: 4.4.1+20221228.326c28fd Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2
++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.sO4leb/_old 2022-12-30 11:09:06.497308582 +0100 +++ /var/tmp/diff_new_pack.sO4leb/_new 2022-12-30 11:09:06.497308582 +0100 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">bd7d326e7b0c684c3f93a95851474fcb263a5689</param> + <param name="changesrevision">326c28fd1b60f5e1a0c71006e799989869431a75</param> </service> </servicedata> (No newline at EOF)
++++++ crmsh-4.4.1+20221227.bd7d326e.tar.bz2 -> crmsh-4.4.1+20221228.326c28fd.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.4.1+20221227.bd7d326e/crmsh/report/utillib.py new/crmsh-4.4.1+20221228.326c28fd/crmsh/report/utillib.py --- old/crmsh-4.4.1+20221227.bd7d326e/crmsh/report/utillib.py 2022-12-27 09:04:26.000000000 +0100 +++ new/crmsh-4.4.1+20221228.326c28fd/crmsh/report/utillib.py 2022-12-28 10:33:36.000000000 +0100 @@ -342,7 +342,7 @@
logger.debug("journalctl from: '%d' until: '%d' from_time: '%s' to_time: '%s' > %s", from_t, to_t, from_time, to_time, outf) - cmd = 'journalctl -o short-iso --since "%s" --until "%s" --no-pager | tail -n +2' % \ + cmd = 'journalctl -o short-iso-precise --since "%s" --until "%s" --no-pager | tail -n +2' % \ (from_time, to_time) crmutils.str2file(get_command_info(cmd)[1], outf)
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.4.1+20221227.bd7d326e/crmsh/ui_cluster.py new/crmsh-4.4.1+20221228.326c28fd/crmsh/ui_cluster.py --- old/crmsh-4.4.1+20221227.bd7d326e/crmsh/ui_cluster.py 2022-12-27 09:04:26.000000000 +0100 +++ new/crmsh-4.4.1+20221228.326c28fd/crmsh/ui_cluster.py 2022-12-28 10:33:36.000000000 +0100 @@ -90,7 +90,7 @@ @command.skill_level('administrator') def do_start(self, context, *args): ''' - Starts the cluster services on all nodes or specific node(s) + Starts the cluster stack on all nodes or specific node(s) ''' service_check_list = ["pacemaker.service"] start_qdevice = False @@ -101,7 +101,7 @@ node_list = parse_option_for_nodes(context, *args) for node in node_list[:]: if all([utils.service_is_active(srv, remote_addr=node) for srv in service_check_list]): - logger.info("Cluster services already started on {}".format(node)) + logger.info("The cluster stack already started on {}".format(node)) node_list.remove(node) if not node_list: return @@ -112,25 +112,25 @@ if start_qdevice: qdevice.QDevice.check_qdevice_vote() for node in node_list: - logger.info("Cluster services started on {}".format(node)) + logger.info("The cluster stack started on {}".format(node))
@command.skill_level('administrator') def do_stop(self, context, *args): ''' - Stops the cluster services on all nodes or specific node(s) + Stops the cluster stack on all nodes or specific node(s) ''' node_list = parse_option_for_nodes(context, *args) for node in node_list[:]: if not utils.service_is_active("corosync.service", remote_addr=node): if utils.service_is_active("sbd.service", remote_addr=node): utils.stop_service("corosync", remote_addr=node) - logger.info("Cluster services stopped on {}".format(node)) + logger.info("The cluster stack stopped on {}".format(node)) else: - logger.info("Cluster services already stopped on {}".format(node)) + logger.info("The cluster stack already stopped on {}".format(node)) node_list.remove(node) elif not utils.service_is_active("pacemaker.service", remote_addr=node): utils.stop_service("corosync", remote_addr=node) - logger.info("Cluster services stopped on {}".format(node)) + logger.info("The cluster stack stopped on {}".format(node)) node_list.remove(node) if not node_list: return @@ -158,12 +158,12 @@ node_list = utils.stop_service("corosync", node_list=node_list)
for node in node_list: - logger.info("Cluster services stopped on {}".format(node)) + logger.info("The cluster stack stopped on {}".format(node))
@command.skill_level('administrator') def do_restart(self, context, *args): ''' - Restarts the cluster services on all nodes or specific node(s) + Restarts the cluster stack on all nodes or specific node(s) ''' parse_option_for_nodes(context, *args) self.do_stop(context, *args) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.4.1+20221227.bd7d326e/crmsh/ui_configure.py new/crmsh-4.4.1+20221228.326c28fd/crmsh/ui_configure.py --- old/crmsh-4.4.1+20221227.bd7d326e/crmsh/ui_configure.py 2022-12-27 09:04:26.000000000 +0100 +++ new/crmsh-4.4.1+20221228.326c28fd/crmsh/ui_configure.py 2022-12-28 10:33:36.000000000 +0100 @@ -866,6 +866,9 @@ @command.completers(compl.choice(['nodes'])) def do_erase(self, context, nodes=None): "usage: erase [nodes]" + if not options.regression_tests: + logger.warning("`crm configure erase` is deprecated. The replacement could be `crm cluster remove [node]`") + return True cib_factory.ensure_cib_updated() if nodes is None: return cib_factory.erase() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.4.1+20221227.bd7d326e/doc/crm.8.adoc new/crmsh-4.4.1+20221228.326c28fd/doc/crm.8.adoc --- old/crmsh-4.4.1+20221227.bd7d326e/doc/crm.8.adoc 2022-12-27 09:04:26.000000000 +0100 +++ new/crmsh-4.4.1+20221228.326c28fd/doc/crm.8.adoc 2022-12-28 10:33:36.000000000 +0100 @@ -2904,6 +2904,12 @@ [[cmdhelp_configure_erase,erase the CIB]] ==== `erase`
+.Deprecation note +**************************** +`crm configure erase` is deprecated. +The replacement could be `crm cluster remove [node]` +**************************** + The `erase` clears all configuration. Apart from nodes. To remove nodes, you have to specify an additional keyword `nodes`.
diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-4.4.1+20221227.bd7d326e/test/unittests/test_ui_cluster.py new/crmsh-4.4.1+20221228.326c28fd/test/unittests/test_ui_cluster.py --- old/crmsh-4.4.1+20221227.bd7d326e/test/unittests/test_ui_cluster.py 2022-12-27 09:04:26.000000000 +0100 +++ new/crmsh-4.4.1+20221228.326c28fd/test/unittests/test_ui_cluster.py 2022-12-28 10:33:36.000000000 +0100 @@ -52,8 +52,8 @@ mock.call("pacemaker.service", remote_addr="node2") ]) mock_info.assert_has_calls([ - mock.call("Cluster services already started on node1"), - mock.call("Cluster services already started on node2") + mock.call("The cluster stack already started on node1"), + mock.call("The cluster stack already started on node2") ])
@mock.patch('crmsh.qdevice.QDevice.check_qdevice_vote') @@ -78,7 +78,7 @@ ]) mock_start.assert_called_once_with("corosync-qdevice", node_list=["node1"]) mock_qdevice_configured.assert_called_once_with() - mock_info.assert_called_once_with("Cluster services started on node1") + mock_info.assert_called_once_with("The cluster stack started on node1")
@mock.patch('logging.Logger.info') @mock.patch('crmsh.utils.service_is_active') @@ -92,7 +92,7 @@ mock.call("corosync.service", remote_addr="node1"), mock.call("sbd.service", remote_addr="node1") ]) - mock_info.assert_called_once_with("Cluster services already stopped on node1") + mock_info.assert_called_once_with("The cluster stack already stopped on node1")
@mock.patch('logging.Logger.debug') @mock.patch('logging.Logger.info') @@ -126,6 +126,6 @@ mock.call("corosync-qdevice.service", node_list=["node1"]), mock.call("corosync", node_list=["node1"]) ]) - mock_info.assert_called_once_with("Cluster services stopped on node1") + mock_info.assert_called_once_with("The cluster stack stopped on node1") mock_debug.assert_called_once_with("Quorum is lost; Set enable_quorum_fencing=0 and enable_quorum_lockspace=0 for dlm") mock_check.assert_called_once_with(mock_get_dc, wait_timeout=25)