commit crmsh for openSUSE:Factory
Script 'mail_helper' called by obssrc Hello community, here is the log from the commit of package crmsh for openSUSE:Factory checked in at 2024-08-05 17:21:57 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Comparing /work/SRC/openSUSE:Factory/crmsh (Old) and /work/SRC/openSUSE:Factory/.crmsh.new.7232 (New) ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Package is "crmsh" Mon Aug 5 17:21:57 2024 rev:340 rq:1191561 version:5.0.0+20240731.2abacf27 Changes: -------- --- /work/SRC/openSUSE:Factory/crmsh/crmsh.changes 2024-07-22 17:15:48.948027982 +0200 +++ /work/SRC/openSUSE:Factory/.crmsh.new.7232/crmsh.changes 2024-08-05 17:22:56.815234377 +0200 @@ -1,0 +2,18 @@ +Wed Jul 31 06:54:32 UTC 2024 - XLiang@suse.com + +- Update to version 5.0.0+20240731.2abacf27: + * Dev: crm_rpmcheck: use ansible to get package versions + * Fix: ui_context: enter_level() should not check requirements for non-functional subcommands + +------------------------------------------------------------------- +Tue Jul 23 00:59:47 UTC 2024 - XLiang@suse.com + +- Update to version 5.0.0+20240723.68039940: + * Dev: prun: replace deprecated stdlib API asyncio.get_event_loop() + * unused code removal + * Dev: ui_cluster: refactor Cluster._wait_for_dc() + * Dev: utils: revert previous changes to get_dc() + * Dev: utils: rename wait4dc to wait_dc_stable + * DC lost during wait + +------------------------------------------------------------------- Old: ---- crmsh-5.0.0+20240718.3877db63.tar.bz2 New: ---- crmsh-5.0.0+20240731.2abacf27.tar.bz2 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ Other differences: ------------------ ++++++ crmsh.spec ++++++ --- /var/tmp/diff_new_pack.HVYvzn/_old 2024-08-05 17:22:57.455260622 +0200 +++ /var/tmp/diff_new_pack.HVYvzn/_new 2024-08-05 17:22:57.455260622 +0200 @@ -36,7 +36,7 @@ Summary: High Availability cluster command-line interface License: GPL-2.0-or-later Group: %{pkg_group} -Version: 5.0.0+20240718.3877db63 +Version: 5.0.0+20240731.2abacf27 Release: 0 URL: http://crmsh.github.io Source0: %{name}-%{version}.tar.bz2 ++++++ _servicedata ++++++ --- /var/tmp/diff_new_pack.HVYvzn/_old 2024-08-05 17:22:57.511262919 +0200 +++ /var/tmp/diff_new_pack.HVYvzn/_new 2024-08-05 17:22:57.511262919 +0200 @@ -9,7 +9,7 @@ </service> <service name="tar_scm"> <param name="url">https://github.com/ClusterLabs/crmsh.git</param> - <param name="changesrevision">3877db6363f74e6f7d0d8a94fd8b6aa63880cb61</param> + <param name="changesrevision">2abacf273fa8804ad537d8338127b93e30bf2a49</param> </service> </servicedata> (No newline at EOF) ++++++ crmsh-5.0.0+20240718.3877db63.tar.bz2 -> crmsh-5.0.0+20240731.2abacf27.tar.bz2 ++++++ diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/prun/runner.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/prun/runner.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/prun/runner.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/prun/runner.py 2024-07-31 08:38:07.000000000 +0200 @@ -55,7 +55,7 @@ ) if timeout_seconds > 0: awaitable = self._timeout_limit(timeout_seconds, awaitable) - return asyncio.get_event_loop().run_until_complete(awaitable) + return asyncio.get_event_loop_policy().get_event_loop().run_until_complete(awaitable) async def _timeout_limit(self, timeout_seconds: int, awaitable: typing.Awaitable): assert timeout_seconds > 0 diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_cluster.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_cluster.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_cluster.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_cluster.py 2024-07-31 08:38:07.000000000 +0200 @@ -225,22 +225,6 @@ return True @staticmethod - def _wait_for_dc(node=None): - """ - Wait for the cluster's DC to become available - """ - if not ServiceManager().service_is_active("pacemaker.service", remote_addr=node): - return - - dc_deadtime = utils.get_property("dc-deadtime", peer=node) or str(constants.DC_DEADTIME_DEFAULT) - dc_timeout = int(dc_deadtime.strip('s')) + 5 - try: - utils.check_function_with_timeout(utils.get_dc, wait_timeout=dc_timeout, peer=node) - except TimeoutError: - logger.error("No DC found currently, please wait if the cluster is still starting") - raise utils.TerminateSubCommand - - @staticmethod def _set_dlm(node=None): """ When dlm running and quorum is lost, before stop cluster service, should set @@ -261,7 +245,7 @@ return logger.debug(f"stop node list: {node_list}") - self._wait_for_dc(node_list[0]) + utils.wait_for_dc(node_list[0]) self._set_dlm(node_list[0]) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_configure.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_configure.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_configure.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_configure.py 2024-07-31 08:38:07.000000000 +0200 @@ -862,7 +862,7 @@ argl = [x for x in argl if x not in ('-f', '--force')] if arg_force or config.core.force: if self._stop_if_running(argl) > 0: - utils.wait4dc(what="Stopping %s" % (", ".join(argl))) + utils.wait_dc_stable(what="Stopping %s" % (", ".join(argl))) cib_factory.ensure_cib_updated() return cib_factory.delete(*argl) diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_context.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_context.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_context.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_context.py 2024-07-31 08:38:07.000000000 +0200 @@ -16,7 +16,7 @@ logger = log.setup_logger(__name__) logger_utils = log.LoggerUtils(logger) -_NON_FUNCTIONAL_COMMANDS = {'help', 'cd', 'ls', 'quit', 'up'} +_NON_FUNCTIONAL_COMMANDS = {'help', 'ls'} _NON_FUNCTIONAL_OPTIONS = {'--help', '--help-without-redirect'} class Context(object): @@ -104,7 +104,7 @@ # wait for dc if wait flag set if self._wait_for_dc: - return utils.wait4dc(self.command_name, not options.batch) + return utils.wait_dc_stable(self.command_name, not options.batch) return rv def complete(self, line): @@ -257,8 +257,10 @@ self._in_transit = True entry = level() - if 'requires' in dir(entry) and not entry.requires(): - self.fatal_error("Missing requirements") + if self.command_name not in _NON_FUNCTIONAL_COMMANDS \ + and all(arg not in _NON_FUNCTIONAL_OPTIONS for arg in self.command_args): + if 'requires' in dir(entry) and not entry.requires(): + self.fatal_error("Missing requirements") self.stack.append(entry) self.clear_readline_cache() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_history.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_history.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_history.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_history.py 2024-07-31 08:38:07.000000000 +0200 @@ -158,7 +158,7 @@ def do_latest(self, context): "usage: latest" self._init_source() - if not utils.wait4dc("transition", not options.batch): + if not utils.wait_dc_stable("transition", not options.batch): return False self._set_source("live") crm_report().refresh_source() diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_resource.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_resource.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/ui_resource.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/ui_resource.py 2024-07-31 08:38:07.000000000 +0200 @@ -325,7 +325,7 @@ logger.info("ordering %s to stop", ", ".join(resources)) if not self._commit_meta_attrs(context, resources, "target-role", "Stopped"): return False - if not utils.wait4dc("stop", not options.batch): + if not utils.wait_dc_stable("stop", not options.batch): return False logger.info("ordering %s to start", ", ".join(resources)) return self._commit_meta_attrs(context, resources, "target-role", "Started") diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/crmsh/utils.py new/crmsh-5.0.0+20240731.2abacf27/crmsh/utils.py --- old/crmsh-5.0.0+20240718.3877db63/crmsh/utils.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/crmsh/utils.py 2024-07-31 08:38:07.000000000 +0200 @@ -1,9 +1,8 @@ # Copyright (C) 2008-2011 Dejan Muhamedagic <dmuhamedagic@suse.de> # See COPYING for license information. - +import asyncio import errno import os -import socket import sys import typing from tempfile import mkstemp @@ -14,7 +13,6 @@ import datetime import shutil import shlex -import bz2 import fnmatch import gc import ipaddress @@ -90,6 +88,11 @@ return len(iterable) - iterable[::-1].index(value) - 1 +def raise_exception(e): + # a wrapper for raising an exception in lambda function + raise e + + def memoize(function): "Decorator to invoke a function once only for any argument" memoized = {} @@ -999,7 +1002,21 @@ return out.split()[-1] -def wait4dc(what="", show_progress=True): +def wait_for_dc(node: str = None): + """ + Wait for the cluster's DC to become available + """ + if not ServiceManager().service_is_active("pacemaker.service", remote_addr=node): + raise ValueError("Pacemaker is not running. No DC.") + dc_deadtime = get_property("dc-deadtime", peer=node) or str(constants.DC_DEADTIME_DEFAULT) + dc_timeout = crm_msec(dc_deadtime) // 1000 + 5 + try: + return retry_with_timeout(lambda: get_dc(node) or raise_exception(Exception()), dc_timeout) + except TimeoutError: + raise ValueError("No DC found currently, please wait if the cluster is still starting") + + +def wait_dc_stable(what="", show_progress=True): ''' Wait for the DC to get into the S_IDLE state. This should be invoked only after a CIB modification which would exercise @@ -1022,8 +1039,8 @@ There's no timeout, as we expect the DC to eventually becomes idle. ''' - dc = get_dc() - if not dc: + + if not wait_for_dc(): logger.warning("can't find DC") return False cmd = "crm_attribute -Gq -t crm_config -n crmd-transition-delay 2> /dev/null" @@ -1039,7 +1056,7 @@ max_sleep = 1.00 sleep_time = init_sleep while True: - dc = get_dc() + dc = wait_for_dc() if not dc: logger.warning("DC lost during wait") return False @@ -2920,20 +2937,22 @@ return rc -def check_function_with_timeout(check_function, wait_timeout=30, interval=1, *args, **kwargs): +def retry_with_timeout(callable, timeout_sec: float, interval_sec=1): + """Try callable repeatedly until it returns without raising an exception. + + Return the return value of callable, + or raises TimeoutError if it does not return a value after retrying for timeout_sec. + + The callable runs in the calling thread and should not block for a long time. """ - Run check_function in a loop - Return when check_function is true - Raise TimeoutError when timeout - """ - current_time = int(time.time()) - timeout = current_time + wait_timeout - while current_time <= timeout: - if check_function(*args, **kwargs): - return - time.sleep(interval) - current_time = int(time.time()) - raise TimeoutError + async def wrapper(): + while True: + try: + return callable() + except Exception: + pass + await asyncio.sleep(interval_sec) + return asyncio.get_event_loop_policy().get_event_loop().run_until_complete(asyncio.wait_for(wrapper(), timeout_sec)) def fetch_cluster_node_list_from_node(init_node): diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/test/unittests/test_ui_cluster.py new/crmsh-5.0.0+20240731.2abacf27/test/unittests/test_ui_cluster.py --- old/crmsh-5.0.0+20240718.3877db63/test/unittests/test_ui_cluster.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/test/unittests/test_ui_cluster.py 2024-07-31 08:38:07.000000000 +0200 @@ -80,7 +80,7 @@ mock_qdevice_configured.assert_called_once_with() mock_info.assert_called_once_with("The cluster stack started on node1") - @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc') + @mock.patch('crmsh.utils.wait_for_dc') @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service') @mock.patch('crmsh.ui_cluster.parse_option_for_nodes') def test_do_stop_return(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc): @@ -98,7 +98,7 @@ @mock.patch('logging.Logger.info') @mock.patch('crmsh.ui_cluster.ServiceManager') @mock.patch('crmsh.ui_cluster.Cluster._set_dlm') - @mock.patch('crmsh.ui_cluster.Cluster._wait_for_dc') + @mock.patch('crmsh.utils.wait_for_dc') @mock.patch('crmsh.ui_cluster.Cluster._node_ready_to_stop_cluster_service') @mock.patch('crmsh.ui_cluster.parse_option_for_nodes') def test_do_stop(self, mock_parse_nodes, mock_node_ready_to_stop_cluster_service, mock_dc, diff -urN '--exclude=CVS' '--exclude=.cvsignore' '--exclude=.svn' '--exclude=.svnignore' old/crmsh-5.0.0+20240718.3877db63/utils/crm_rpmcheck.py new/crmsh-5.0.0+20240731.2abacf27/utils/crm_rpmcheck.py --- old/crmsh-5.0.0+20240718.3877db63/utils/crm_rpmcheck.py 2024-07-18 11:09:03.000000000 +0200 +++ new/crmsh-5.0.0+20240731.2abacf27/utils/crm_rpmcheck.py 2024-07-31 08:38:07.000000000 +0200 @@ -6,7 +6,7 @@ import sys import json import subprocess - +import shutil def run(cmd): proc = subprocess.Popen(cmd, @@ -23,14 +23,50 @@ """ Gathers version and release information about a package. """ - if os.path.isfile('/bin/rpm'): + if shutil.which('ansible'): + rc, data = ansible_package_data(pkg) + if rc == 0: + return data + + if shutil.which('rpm'): return rpm_package_data(pkg) - if os.path.isfile('/usr/bin/dpkg'): + if shutil.which('dpkg'): return dpkg_package_data(pkg) return {'name': pkg, 'error': "unknown package manager"} +_packages = None +def ansible_package_data(pkg) -> tuple[int, dict]: + """ + Gathers version and release information about a package. + Using ansible. + """ + global _packages + if not _packages: + # if _packages is None, then get it + rc, out, err = run(['ansible', '-m', 'package_facts', 'localhost']) + if rc == -1: + return -1, {} + # output format 'localhost | SUCCESS => { json...' + bracket_pos = out.find('{') + if bracket_pos == -1: + return -1, {} + is_ok = out[:bracket_pos].find('SUCCESS =>') + if is_ok == -1: + return -1, {} + + # get the json part + out = out[bracket_pos:] + json_tree = json.loads(out) + # get _packages + _packages = json_tree['ansible_facts']['packages'] + + if pkg not in _packages: + return 0, {'name': pkg, 'error': "package not installed"} + else: + return 0, _packages[pkg][0] + def rpm_package_data(pkg): """
participants (1)
-
Source-Sync