From cd8bc9a283ce62a533d8584b633d1b26c4a79eee Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Tue, 19 Aug 2025 11:16:01 -0700
Subject: [PATCH 1/6] Bump requests from 2.32.4 to 2.32.5 (#105)
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 3a5f1874..ce47c360 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -205,7 +205,7 @@ referencing==0.36.2
# ansible-lint
# jsonschema
# jsonschema-specifications
-requests==2.32.4
+requests==2.32.5
# via
# -r requirements.in
# azure-core
From d76cf8e517f02146c31286702c2276810057a860 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 3 Sep 2025 11:06:42 -0700
Subject: [PATCH 2/6] Bump requests from 2.32.4 to 2.32.5 (#106)
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index ce47c360..703e1107 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -144,7 +144,7 @@ packaging==25.0
# ansible-runner
# black
# pytest
-pandas==2.3.1
+pandas==2.3.2
# via -r requirements.in
pathspec==0.12.1
# via
From 52cfb89896ff7eb9dcbb288cc29f6299426d066a Mon Sep 17 00:00:00 2001
From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com>
Date: Tue, 30 Sep 2025 16:39:53 +0000
Subject: [PATCH 3/6] Updated logic for HA DB/SCS cluster configuration
validation (#114)
---
.gitignore | 3 +
requirements.txt | 47 +-
scripts/setup.sh | 16 +-
src/ansible.cfg | 2 +-
src/module_utils/get_pcmk_properties.py | 391 ++++++---
src/module_utils/sap_automation_qa.py | 2 +-
src/modules/get_pcmk_properties_db.py | 50 +-
src/modules/get_pcmk_properties_scs.py | 78 +-
.../ha_db_hana/tasks/files/constants.yaml | 813 +++++++++++++-----
src/roles/ha_scs/tasks/files/constants.yaml | 747 ++++++++++++----
src/templates/report.html | 4 +-
src/vars/input-api.yaml | 4 +-
.../module_utils/get_pcmk_properties_test.py | 141 ++-
tests/modules/get_pcmk_properties_db_test.py | 98 ++-
tests/modules/get_pcmk_properties_scs_test.py | 135 +--
15 files changed, 1776 insertions(+), 755 deletions(-)
diff --git a/.gitignore b/.gitignore
index 82d7c8f1..1bd7e322 100644
--- a/.gitignore
+++ b/.gitignore
@@ -409,3 +409,6 @@ VMWPASSWORD
.coverage.*
*.ini
.ansible/
+
+# GitHub Copilot configuration files
+copilot-instructions.md
diff --git a/requirements.txt b/requirements.txt
index 703e1107..bd3a4766 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,14 +4,14 @@
#
# pip-compile requirements.in
#
-ansible-compat==25.6.0
+ansible-compat==25.8.1
# via ansible-lint
-ansible-core==2.17.13
+ansible-core==2.17.14
# via
# -r requirements.in
# ansible-compat
# ansible-lint
-ansible-lint==25.6.1
+ansible-lint==25.8.2
# via -r requirements.in
ansible-runner==2.4.1
# via -r requirements.in
@@ -30,7 +30,7 @@ azure-core==1.35.0
# azure-mgmt-core
# azure-storage-blob
# azure-storage-queue
-azure-identity==1.23.1
+azure-identity==1.24.0
# via
# -r requirements.in
# azure-kusto-data
@@ -58,21 +58,21 @@ black==25.1.0
# ansible-lint
bracex==2.6
# via wcmatch
-certifi==2025.7.14
+certifi==2025.8.3
# via requests
-cffi==1.17.1
+cffi==2.0.0
# via cryptography
-charset-normalizer==3.4.2
+charset-normalizer==3.4.3
# via requests
click==8.2.1
# via
# -r requirements.in
# black
-coverage[toml]==7.10.0
+coverage[toml]==7.10.6
# via
# -r requirements.in
# pytest-cov
-cryptography==45.0.5
+cryptography==45.0.7
# via
# ansible-core
# azure-identity
@@ -84,7 +84,7 @@ dill==0.4.0
# via pylint
exceptiongroup==1.3.0
# via pytest
-filelock==3.18.0
+filelock==3.19.1
# via ansible-lint
idna==3.10
# via requests
@@ -107,15 +107,15 @@ jinja2==3.1.6
# ansible-core
jmespath==1.0.1
# via -r requirements.in
-jsonschema==4.25.0
+jsonschema==4.25.1
# via
# ansible-compat
# ansible-lint
-jsonschema-specifications==2025.4.1
+jsonschema-specifications==2025.9.1
# via jsonschema
lockfile==0.12.2
# via python-daemon
-markdown-it-py==3.0.0
+markdown-it-py==4.0.0
# via rich
markupsafe==3.0.2
# via jinja2
@@ -153,7 +153,7 @@ pathspec==0.12.1
# yamllint
pexpect==4.9.0
# via ansible-runner
-platformdirs==4.3.8
+platformdirs==4.4.0
# via
# black
# pylint
@@ -163,7 +163,7 @@ pluggy==1.6.0
# pytest-cov
ptyprocess==0.7.0
# via pexpect
-pycparser==2.22
+pycparser==2.23
# via cffi
pygments==2.19.2
# via
@@ -173,16 +173,16 @@ pyjwt[crypto]==2.10.1
# via
# msal
# pyjwt
-pylint==3.3.7
+pylint==3.3.8
# via -r requirements.in
-pytest==8.4.1
+pytest==8.4.2
# via
# -r requirements.in
# pytest-cov
# pytest-mock
-pytest-cov==6.2.1
+pytest-cov==7.0.0
# via -r requirements.in
-pytest-mock==3.14.1
+pytest-mock==3.15.0
# via -r requirements.in
python-daemon==3.1.2
# via ansible-runner
@@ -213,13 +213,13 @@ requests==2.32.5
# msal
resolvelib==1.0.1
# via ansible-core
-rich==14.0.0
+rich==14.1.0
# via -r requirements.in
-rpds-py==0.26.0
+rpds-py==0.27.1
# via
# jsonschema
# referencing
-ruamel-yaml==0.18.14
+ruamel-yaml==0.18.15
# via ansible-lint
ruamel-yaml-clib==0.2.12
# via ruamel-yaml
@@ -241,7 +241,7 @@ tomli==2.2.1
# pytest
tomlkit==0.13.3
# via pylint
-typing-extensions==4.14.1
+typing-extensions==4.15.0
# via
# astroid
# azure-core
@@ -252,7 +252,6 @@ typing-extensions==4.14.1
# black
# exceptiongroup
# referencing
- # rich
tzdata==2025.2
# via pandas
urllib3==2.5.0
diff --git a/scripts/setup.sh b/scripts/setup.sh
index 47b6a249..4abc0d49 100755
--- a/scripts/setup.sh
+++ b/scripts/setup.sh
@@ -13,9 +13,21 @@ set_output_context
# Ensure we're in the project root directory
cd "$(dirname "$script_dir")"
-packages=("python3-pip" "ansible" "sshpass" "python3-venv")
+packages=("python3-pip" "sshpass" "python3-venv")
install_packages "${packages[@]}"
+# Install az cli if not present
+if ! command_exists az; then
+ log "INFO" "Azure CLI not found. Installing Azure CLI..."
+ curl -L https://aka.ms/InstallAzureCli | bash
+ if command_exists az; then
+ log "INFO" "Azure CLI installed successfully."
+ else
+ log "ERROR" "Failed to install Azure CLI. Please install it manually."
+ exit 1
+ fi
+fi
+
# Verify Python3 is available
if ! command_exists python3; then
log "ERROR" "Python3 is not available after installation. Please install Python3 manually."
@@ -47,7 +59,7 @@ log "INFO" "Installing Python packages..."
if ! pip install --upgrade pip; then
log "ERROR" "Failed to upgrade pip."
fi
-if pip install pyyaml requests azure-identity azure-kusto-data azure-kusto-ingest azure-mgmt-network azure-storage-blob azure-storage-queue; then
+if pip install -r requirements.in; then
log "INFO" "Python packages installed successfully."
else
log "ERROR" "Failed to install Python packages."
diff --git a/src/ansible.cfg b/src/ansible.cfg
index 28b92a00..5c7636f6 100644
--- a/src/ansible.cfg
+++ b/src/ansible.cfg
@@ -9,7 +9,7 @@ display_skipped_hosts = False
conditional_bare_variables = False
interpreter_python = auto_silent
callbacks_enabled = profile_tasks
-stdout_callback = yaml
+stdout_callback = default
bin_ansible_callbacks = True
host_key_checking = False
error_on_undefined_vars = True
diff --git a/src/module_utils/get_pcmk_properties.py b/src/module_utils/get_pcmk_properties.py
index dc796165..322bc12d 100644
--- a/src/module_utils/get_pcmk_properties.py
+++ b/src/module_utils/get_pcmk_properties.py
@@ -89,14 +89,35 @@ def _get_expected_value(self, category, name):
:param name: The name of the configuration parameter.
:type name: str
:return: The expected value for the configuration parameter.
- :rtype: str
+ :rtype: tuple(str, bool)
"""
_, defaults_key = self.BASIC_CATEGORIES[category]
fence_config = self.constants["VALID_CONFIGS"].get(self.fencing_mechanism, {})
os_config = self.constants["VALID_CONFIGS"].get(self.os_type, {})
- return fence_config.get(name) or os_config.get(name, self.constants[defaults_key].get(name))
+ fence_param = fence_config.get(name, {})
+ if fence_param:
+ if isinstance(fence_param, dict) and fence_param.get("value"):
+ return (fence_param.get("value", ""), fence_param.get("required", False))
+ elif isinstance(fence_param, (str, list)):
+ return (fence_param, False)
+
+ os_param = os_config.get(name, {})
+ if os_param:
+ if isinstance(os_param, dict) and os_param.get("value"):
+ return (os_param.get("value", ""), os_param.get("required", False))
+ elif isinstance(os_param, (str, list)):
+ return (os_param, False)
+
+ default_param = self.constants[defaults_key].get(name, {})
+ if default_param:
+ if isinstance(default_param, dict) and default_param.get("value"):
+ return (default_param.get("value", ""), default_param.get("required", False))
+ elif isinstance(default_param, (str, list)):
+ return (default_param, False)
+
+ return None
def _get_resource_expected_value(self, resource_type, section, param_name, op_name=None):
"""
@@ -111,20 +132,21 @@ def _get_resource_expected_value(self, resource_type, section, param_name, op_na
:param op_name: The name of the operation (if applicable), defaults to None
:type op_name: str, optional
:return: The expected value for the resource configuration parameter.
- :rtype: str
+ :rtype: tuple(str, bool)
"""
resource_defaults = (
self.constants["RESOURCE_DEFAULTS"].get(self.os_type, {}).get(resource_type, {})
)
-
+ attr = None
if section == "meta_attributes":
- return resource_defaults.get("meta_attributes", {}).get(param_name)
+ attr = resource_defaults.get("meta_attributes", {}).get(param_name)
elif section == "operations":
ops = resource_defaults.get("operations", {}).get(op_name, {})
- return ops.get(param_name)
+ attr = ops.get(param_name)
elif section == "instance_attributes":
- return resource_defaults.get("instance_attributes", {}).get(param_name)
- return None
+ attr = resource_defaults.get("instance_attributes", {}).get(param_name)
+
+ return (attr.get("value"), attr.get("required", False)) if attr else None
def _create_parameter(
self,
@@ -157,22 +179,36 @@ def _create_parameter(
:rtype: dict
"""
if expected_value is None:
- expected_value = self._get_expected_value_for_category(
+ expected_config = self._get_expected_value_for_category(
category, subcategory, name, op_name
)
+ else:
+ if isinstance(expected_value, tuple) and len(expected_value) == 2:
+ expected_config = expected_value # Already in correct format
+ else:
+ expected_config = (expected_value, False)
- status = self._determine_parameter_status(value, expected_value)
+ status = self._determine_parameter_status(value, expected_config)
- if isinstance(expected_value, list):
- expected_value = expected_value[0] if expected_value else ""
- elif isinstance(expected_value, dict):
- expected_value = (
+ display_expected_value = None
+ if expected_config is None:
+ display_expected_value = ""
+ else:
+ if isinstance(expected_config, tuple):
+ display_expected_value = expected_config[0]
+ else:
+ display_expected_value = expected_config
+
+ if isinstance(display_expected_value, list):
+ display_expected_value = display_expected_value[0] if display_expected_value else ""
+ elif isinstance(display_expected_value, dict):
+ display_expected_value = (
[
item
- for val in expected_value.values()
+ for val in display_expected_value.values()
for item in (val if isinstance(val, list) else [val])
]
- if expected_value
+ if display_expected_value
else ""
)
@@ -181,7 +217,7 @@ def _create_parameter(
id=id if id else "",
name=name if not op_name else f"{op_name}_{name}",
value=value,
- expected_value=expected_value if expected_value is not None else "",
+ expected_value=display_expected_value if display_expected_value is not None else "",
status=status if status else TestStatus.ERROR.value,
).to_dict()
@@ -211,34 +247,47 @@ def _get_expected_value_for_category(self, category, subcategory, name, op_name)
else:
return self._get_expected_value(category, name)
- def _determine_parameter_status(self, value, expected_value):
+ def _determine_parameter_status(self, value, expected_config):
"""
Determine the status of a parameter based on its value and expected value.
:param value: The actual value of the parameter.
:type value: str
- :param expected_value: The expected value of the parameter.
- :type expected_value: str or list or dict
+ :param expected_config: The expected value of the parameter and bool indicating if required.
+ :type expected_config: tuple(str, bool)
:return: The status of the parameter.
:rtype: str
"""
- if expected_value is None or value == "":
+ if expected_config is None:
return TestStatus.INFO.value
- elif isinstance(expected_value, (str, list)):
- if isinstance(expected_value, list):
- return (
- TestStatus.SUCCESS.value
- if str(value) in expected_value
- else TestStatus.ERROR.value
- )
+
+ if isinstance(expected_config, tuple):
+ expected_value, is_required = expected_config
+ elif isinstance(expected_config, dict):
+ expected_value = expected_config.get("value")
+ is_required = expected_config.get("required", False)
+ else:
+ expected_value = expected_config
+ is_required = False
+
+ if not value or value == "":
+ if is_required:
+ return TestStatus.WARNING.value
else:
- return (
- TestStatus.SUCCESS.value
- if str(value) == str(expected_value)
- else TestStatus.ERROR.value
- )
+ return TestStatus.INFO.value
+
+ if expected_value is None or expected_value == "":
+ return TestStatus.INFO.value
+ elif isinstance(expected_value, list):
+ return (
+ TestStatus.SUCCESS.value if str(value) in expected_value else TestStatus.ERROR.value
+ )
else:
- return TestStatus.ERROR.value
+ return (
+ TestStatus.SUCCESS.value
+ if str(value) == str(expected_value)
+ else TestStatus.ERROR.value
+ )
def _parse_nvpair_elements(self, elements, category, subcategory=None, op_name=None):
"""
@@ -297,38 +346,12 @@ def _parse_os_parameters(self):
id=section,
name=param_name,
value=value,
- expected_value=expected_value,
+ expected_value=expected_value.get("value", "") if expected_value else None,
)
)
return parameters
- def _parse_basic_config(self, element, category, subcategory=None):
- """
- Parse basic configuration parameters
-
- :param element: The XML element to parse.
- :type element: xml.etree.ElementTree.Element
- :param category: The category of the configuration parameter.
- :type category: str
- :param subcategory: The subcategory of the configuration parameter, defaults to None
- :type subcategory: str, optional
- :return: A list of parameter dictionaries.
- :rtype: list
- """
- parameters = []
- for nvpair in element.findall(".//nvpair"):
- parameters.append(
- self._create_parameter(
- category=category,
- subcategory=subcategory,
- name=nvpair.get("name", ""),
- value=nvpair.get("value", ""),
- id=nvpair.get("id", ""),
- )
- )
- return parameters
-
def _parse_resource(self, element, category):
"""
Parse resource-specific configuration parameters
@@ -376,37 +399,6 @@ def _parse_resource(self, element, category):
)
return parameters
- def _parse_constraints(self, root):
- """
- Parse constraints configuration parameters
-
- :param root: The XML root element to parse.
- :type root: xml.etree.ElementTree.Element
- :return: A list of parameter dictionaries.
- :rtype: list
- """
- parameters = []
- for element in root:
- tag = element.tag
- if tag in self.constants["CONSTRAINTS"]:
- for attr, expected in self.constants["CONSTRAINTS"][tag].items():
- if element.get(attr) is not None:
- parameters.append(
- self._create_parameter(
- category="constraints",
- subcategory=tag,
- id=element.get("id", ""),
- name=attr,
- value=element.get(attr),
- expected_value=expected,
- )
- )
- else:
- continue
- else:
- continue
- return parameters
-
def _parse_resources_section(self, root):
"""
Parse resources section - can be overridden by subclasses for custom resource parsing.
@@ -476,57 +468,26 @@ def _get_scope_from_cib(self, scope):
return self.cib_output.find(xpath)
return None
- def parse_ha_cluster_config(self):
+ def validate_from_constants(self):
"""
- Parse HA cluster configuration XML and return a list of properties.
- This is the main orchestration method that coordinates all parsing activities.
+ Constants-first validation approach: iterate through constants and validate against CIB.
+ This ensures all expected parameters are checked, with offline validation support.
"""
parameters = []
- scopes = [
- "rsc_defaults",
- "crm_config",
- "op_defaults",
- "constraints",
- "resources",
- ]
-
- for scope in scopes:
- if self._should_skip_scope(scope):
- continue
-
- self.category = scope
- if self.cib_output:
- root = self._get_scope_from_cib(scope)
- else:
- root = self.parse_xml_output(
- self.execute_command_subprocess(CIB_ADMIN(scope=scope))
- )
- if not root:
- continue
-
- try:
- if self.category in self.BASIC_CATEGORIES:
- xpath = self.BASIC_CATEGORIES[self.category][0]
- for element in root.findall(xpath):
- parameters.extend(self._parse_basic_config(element, self.category))
-
- elif self.category == "resources":
- parameters.extend(self._parse_resources_section(root))
-
- elif self.category == "constraints":
- parameters.extend(self._parse_constraints(root))
-
- except Exception as ex:
- self.result["message"] += f"Failed to get {self.category} configuration: {str(ex)}"
- continue
+ for category in ["crm_config", "rsc_defaults", "op_defaults"]:
+ if not self._should_skip_scope(category):
+ parameters.extend(self._validate_basic_constants(category))
+ parameters.extend(self._validate_resource_constants())
+ parameters.extend(self._validate_constraint_constants())
try:
if not self.cib_output:
parameters.extend(self._parse_os_parameters())
else:
self.result["message"] += "CIB output provided, skipping OS parameters parsing. "
except Exception as ex:
- self.result["message"] += f"Failed to get OS parameters: {str(ex)} \n"
+ self.result["message"] += f"Failed to get OS parameters: {str(ex)} "
+
try:
if not self.cib_output:
parameters.extend(self._get_additional_parameters())
@@ -535,18 +496,174 @@ def parse_ha_cluster_config(self):
"message"
] += "CIB output provided, skipping additional parameters parsing. "
except Exception as ex:
- self.result["message"] += f"Failed to get additional parameters: {str(ex)} \n"
+ self.result["message"] += f"Failed to get additional parameters: {str(ex)} "
+
failed_parameters = [
param
for param in parameters
if param.get("status", TestStatus.ERROR.value) == TestStatus.ERROR.value
]
+ warning_parameters = [
+ param for param in parameters if param.get("status", "") == TestStatus.WARNING.value
+ ]
+
+ if failed_parameters:
+ overall_status = TestStatus.ERROR.value
+ elif warning_parameters:
+ overall_status = TestStatus.WARNING.value
+ else:
+ overall_status = TestStatus.SUCCESS.value
+
self.result.update(
{
"details": {"parameters": parameters},
- "status": (
- TestStatus.ERROR.value if failed_parameters else TestStatus.SUCCESS.value
- ),
+ "status": overall_status,
}
)
self.result["message"] += "HA Parameter Validation completed successfully. "
+
+ def _validate_basic_constants(self, category):
+ """
+ Validate basic configuration constants with offline validation support.
+ Uses existing CIB parsing logic but focuses on constants-first approach.
+ Creates dynamic subcategories based on element IDs found in CIB.
+
+ :param category: The category to validate (crm_config, rsc_defaults, op_defaults)
+ :type category: str
+ :return: A list of parameter dictionaries
+ :rtype: list
+ """
+ parameters = []
+
+ if category not in self.BASIC_CATEGORIES:
+ return parameters
+
+ _, constants_key = self.BASIC_CATEGORIES[category]
+ category_constants = self.constants.get(constants_key, {})
+
+ for param_name, expected_config in category_constants.items():
+ param_value, param_id = self._find_param_with_element_info(category, param_name)
+ expected_result = self._get_expected_value(category, param_name)
+ if expected_result:
+ expected_value, is_required = expected_result
+ expected_config_tuple = (expected_value, is_required)
+ else:
+ if isinstance(expected_config, dict):
+ expected_value = expected_config.get("value", "")
+ is_required = expected_config.get("required", False)
+ expected_config_tuple = (expected_value, is_required)
+ else:
+ expected_value = str(expected_config)
+ expected_config_tuple = (expected_value, False)
+
+ parameters.append(
+ self._create_parameter(
+ category=category,
+ name=param_name,
+ value=param_value,
+ expected_value=expected_config_tuple,
+ subcategory=param_id if param_id else "",
+ id=param_id,
+ )
+ )
+
+ return parameters
+
+ def _find_param_with_element_info(self, category, param_name):
+ """
+ Find a parameter value and its own unique ID in CIB XML.
+ Returns both the parameter value and the parameter's own ID (not container ID).
+
+ :param category: The category scope to search in (crm_config, rsc_defaults, op_defaults)
+ :type category: str
+ :param param_name: The parameter name to find
+ :type param_name: str
+ :return: Tuple of (parameter_value, parameter_id) or ("", "") if not found
+ :rtype: tuple(str, str)
+ """
+ param_value, param_id = "", ""
+ try:
+ if self.cib_output:
+ root = self._get_scope_from_cib(category)
+ else:
+ root = self.parse_xml_output(
+ self.execute_command_subprocess(CIB_ADMIN(scope=category))
+ )
+
+ if not root:
+ return param_value, param_id
+
+ if category in self.BASIC_CATEGORIES:
+ for element in root.findall(self.BASIC_CATEGORIES[category][0]):
+ for nvpair in element.findall(".//nvpair"):
+ if nvpair.get("name") == param_name:
+ param_id = nvpair.get("id", "")
+ param_value = nvpair.get("value", "")
+ return param_value, param_id
+
+ except Exception as ex:
+ self.result[
+ "message"
+ ] += f"Error finding parameter {param_name} in {category}: {str(ex)} "
+
+ return param_value, param_id
+
+ def _validate_resource_constants(self):
+ """
+ Resource validation - to be overridden by subclasses.
+ Base implementation returns empty list.
+
+ :return: A list of parameter dictionaries
+ :rtype: list
+ """
+ return []
+
+ def _validate_constraint_constants(self):
+ """
+ Validate constraint constants with offline validation support.
+ Uses constants-first approach to validate constraints against CIB.
+
+ :return: A list of parameter dictionaries
+ :rtype: list
+ """
+ parameters = []
+
+ if "CONSTRAINTS" not in self.constants:
+ return parameters
+
+ try:
+ if self.cib_output:
+ constraints_scope = self._get_scope_from_cib("constraints")
+ else:
+ constraints_scope = self.parse_xml_output(
+ self.execute_command_subprocess(CIB_ADMIN(scope="constraints"))
+ )
+
+ if constraints_scope is not None:
+ for constraint_type, constraint_config in self.constants["CONSTRAINTS"].items():
+ elements = constraints_scope.findall(f".//{constraint_type}")
+
+ for element in elements:
+ for attr_name, expected_config in constraint_config.items():
+ actual_value = element.get(attr_name, "")
+ expected_value = (
+ expected_config.get("value")
+ if isinstance(expected_config, dict)
+ else expected_config
+ )
+
+ parameters.append(
+ self._create_parameter(
+ category="constraints",
+ subcategory=constraint_type,
+ id=element.get("id", ""),
+ name=attr_name,
+ value=actual_value,
+ expected_value=expected_value,
+ )
+ )
+
+ except Exception as ex:
+ self.result["message"] += f"Error validating constraint constants: {str(ex)} "
+
+ return parameters
diff --git a/src/module_utils/sap_automation_qa.py b/src/module_utils/sap_automation_qa.py
index 4dc6dae2..bd94b369 100644
--- a/src/module_utils/sap_automation_qa.py
+++ b/src/module_utils/sap_automation_qa.py
@@ -7,7 +7,7 @@
import sys
import logging
import subprocess
-from typing import Optional, Dict, Any
+from typing import Dict, Any
import xml.etree.ElementTree as ET
try:
diff --git a/src/modules/get_pcmk_properties_db.py b/src/modules/get_pcmk_properties_db.py
index 2eeedb15..843da940 100644
--- a/src/modules/get_pcmk_properties_db.py
+++ b/src/modules/get_pcmk_properties_db.py
@@ -18,9 +18,11 @@
try:
from ansible.module_utils.get_pcmk_properties import BaseHAClusterValidator
from ansible.module_utils.enums import OperatingSystemFamily, HanaSRProvider
+ from ansible.module_utils.commands import CIB_ADMIN
except ImportError:
from src.module_utils.get_pcmk_properties import BaseHAClusterValidator
from src.module_utils.enums import OperatingSystemFamily, HanaSRProvider
+ from src.module_utils.commands import CIB_ADMIN
DOCUMENTATION = r"""
---
@@ -194,7 +196,7 @@ def __init__(
)
self.instance_number = instance_number
self.saphanasr_provider = saphanasr_provider
- self.parse_ha_cluster_config()
+ self.validate_from_constants()
def _parse_resources_section(self, root):
"""
@@ -211,6 +213,8 @@ def _parse_resources_section(self, root):
resource_categories.pop("topology", None)
else:
resource_categories.pop("angi_topology", None)
+ resource_categories.pop("angi_filesystem", None)
+ resource_categories.pop("angi_hana", None)
for sub_category, xpath in resource_categories.items():
elements = root.findall(xpath)
@@ -219,6 +223,31 @@ def _parse_resources_section(self, root):
return parameters
+ def _validate_resource_constants(self):
+ """
+ Resource validation with HANA-specific logic and offline validation support.
+ Validates resource constants by iterating through expected parameters.
+
+ :return: A list of parameter dictionaries
+ :rtype: list
+ """
+ parameters = []
+
+ try:
+ if self.cib_output:
+ resource_scope = self._get_scope_from_cib("resources")
+ else:
+ resource_scope = self.parse_xml_output(
+ self.execute_command_subprocess(CIB_ADMIN(scope="resources"))
+ )
+ if resource_scope is not None:
+ parameters.extend(self._parse_resources_section(resource_scope))
+
+ except Exception as ex:
+ self.result["message"] += f"Error validating resource constants: {str(ex)} "
+
+ return parameters
+
def _parse_global_ini_parameters(self):
"""
Parse global.ini parameters specific to SAP HANA.
@@ -255,22 +284,29 @@ def _parse_global_ini_parameters(self):
if sep
}
- for param_name, expected_value in global_ini_defaults.items():
+ for param_name, expected_config in global_ini_defaults.items():
value = global_ini_properties.get(param_name, "")
- if isinstance(expected_value, list):
- if value in expected_value:
- expected_value = value
+ if isinstance(expected_config, dict):
+ expected_value = expected_config.get("value")
+ is_required = expected_config.get("required", False)
+ else:
+ expected_value = expected_config
+ is_required = False
self.log(
logging.INFO,
- f"param_name: {param_name}, value: {value}, expected_value: {expected_value}",
+ f"param_name: {param_name}, value: {value}, expected_value: {expected_config}",
)
parameters.append(
self._create_parameter(
category="global_ini",
name=param_name,
value=value,
- expected_value=expected_value,
+ expected_value=(
+ expected_config.get("value")
+ if isinstance(expected_config, dict)
+ else expected_value
+ ),
)
)
except Exception as ex:
diff --git a/src/modules/get_pcmk_properties_scs.py b/src/modules/get_pcmk_properties_scs.py
index 8b3d95d5..59680f1c 100644
--- a/src/modules/get_pcmk_properties_scs.py
+++ b/src/modules/get_pcmk_properties_scs.py
@@ -17,9 +17,11 @@
try:
from ansible.module_utils.get_pcmk_properties import BaseHAClusterValidator
from ansible.module_utils.enums import OperatingSystemFamily, TestStatus
+ from ansible.module_utils.commands import CIB_ADMIN
except ImportError:
from src.module_utils.get_pcmk_properties import BaseHAClusterValidator
from src.module_utils.enums import OperatingSystemFamily, TestStatus
+ from src.module_utils.commands import CIB_ADMIN
DOCUMENTATION = r"""
@@ -191,7 +193,7 @@ def __init__(
self.scs_instance_number = scs_instance_number
self.ers_instance_number = ers_instance_number
self.nfs_provider = nfs_provider
- self.parse_ha_cluster_config()
+ self.validate_from_constants()
def _get_expected_value_for_category(self, category, subcategory, name, op_name):
"""
@@ -218,17 +220,49 @@ def _get_expected_value_for_category(self, category, subcategory, name, op_name)
else:
return self._get_expected_value(category, name)
+ def _validate_resource_constants(self):
+ """
+ Resource validation with SCS-specific logic and offline validation support.
+ Validates resource constants by iterating through expected parameters.
+
+ :return: A list of parameter dictionaries
+ :rtype: list
+ """
+ parameters = []
+
+ try:
+ if self.cib_output:
+ resource_scope = self._get_scope_from_cib("resources")
+ else:
+ resource_scope = self.parse_xml_output(
+ self.execute_command_subprocess(CIB_ADMIN(scope="resources"))
+ )
+
+ if resource_scope is not None:
+ parameters.extend(self._parse_resources_section(resource_scope))
+
+ except Exception as ex:
+ self.result["message"] += f"Error validating resource constants: {str(ex)} "
+
+ return parameters
+
def _determine_parameter_status(self, value, expected_value):
"""
Determine the status of a parameter with SCS-specific logic for NFS provider.
:param value: The actual value of the parameter.
:type value: str
- :param expected_value: The expected value of the parameter.
- :type expected_value: str or list or dict
+ :param expected_value: The expected value tuple (value, required) or legacy format.
+ :type expected_value: tuple or str or list or dict
:return: The status of the parameter.
:rtype: str
"""
+ if isinstance(expected_value, tuple):
+ expected_val, required = expected_value
+ if not required and (expected_val is None or value == ""):
+ return TestStatus.INFO.value
+ expected_value = expected_val
+
if expected_value is None or value == "":
return TestStatus.INFO.value
elif isinstance(expected_value, (str, list)):
@@ -245,12 +279,38 @@ def _determine_parameter_status(self, value, expected_value):
else TestStatus.ERROR.value
)
elif isinstance(expected_value, dict):
- provider_values = expected_value.get(self.nfs_provider, expected_value.get("AFS", []))
- return (
- TestStatus.SUCCESS.value
- if str(value) in provider_values
- else TestStatus.ERROR.value
- )
+ provider_values = []
+ if self.nfs_provider and self.nfs_provider in expected_value:
+ provider_config = expected_value[self.nfs_provider]
+ if isinstance(provider_config, dict) and "value" in provider_config:
+ provider_values = provider_config["value"]
+ else:
+ provider_values = provider_config
+ else:
+ # If provider is unknown/not set, collect all provider values
+ for provider_key, provider_config in expected_value.items():
+ if isinstance(provider_config, dict) and "value" in provider_config:
+ if isinstance(provider_config["value"], list):
+ provider_values.extend(provider_config["value"])
+ else:
+ provider_values.append(provider_config["value"])
+ elif isinstance(provider_config, list):
+ provider_values.extend(provider_config)
+ else:
+ provider_values.append(provider_config)
+
+ if isinstance(provider_values, list):
+ return (
+ TestStatus.SUCCESS.value
+ if str(value) in provider_values
+ else TestStatus.ERROR.value
+ )
+ else:
+ return (
+ TestStatus.SUCCESS.value
+ if str(value) == str(provider_values)
+ else TestStatus.ERROR.value
+ )
else:
return TestStatus.ERROR.value
diff --git a/src/roles/ha_db_hana/tasks/files/constants.yaml b/src/roles/ha_db_hana/tasks/files/constants.yaml
index fa103cdf..f2865b5d 100644
--- a/src/roles/ha_db_hana/tasks/files/constants.yaml
+++ b/src/roles/ha_db_hana/tasks/files/constants.yaml
@@ -7,330 +7,706 @@
# === CRM Configuration Defaults ===
# cibadmin --query --scope crm_config
CRM_CONFIG_DEFAULTS:
- cluster-infrastructure: corosync
- priority-fencing-delay: ['30', '30s']
- stonith-action: reboot
- stonith-enabled: 'true'
- concurrent-fencing: 'true'
- maintenance-mode: 'false'
- node-health-strategy: 'custom'
- azure-events-az_globalPullState: 'IDLE'
+ cluster-infrastructure:
+ value: corosync
+ required: false
+ priority-fencing-delay:
+ value: ["30", "30s"]
+ required: true
+ stonith-action:
+ value: reboot
+ required: false
+ stonith-enabled:
+ value: "true"
+ required: false
+ concurrent-fencing:
+ value: "true"
+ required: false
+ maintenance-mode:
+ value: "false"
+ required: false
+ node-health-strategy:
+ value: "custom"
+ required: false
+ azure-events-az_globalPullState:
+ value: "IDLE"
+ required: false
# === Operation Defaults ===
# cibadmin --query --scope op_defaults
OP_DEFAULTS:
- record-pending: 'true'
- timeout: ['600', '600s']
+ record-pending:
+ value: "true"
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
# === Resource Defaults ===
# cibadmin --query --scope rsc_defaults
RSC_DEFAULTS:
- migration-threshold: '5000'
- priority: '1'
- resource-stickiness: '1000'
+ migration-threshold:
+ value: "5000"
+ required: false
+ priority:
+ value: "1"
+ required: false
+ resource-stickiness:
+ value: "1000"
+ required: false
# === Constraints ===
# cibadmin --query --scope constraints
CONSTRAINTS:
rsc_colocation:
- score: "4000"
- rsc-role: "Started"
+ score:
+ value: "4000"
+ required: false
+ rsc-role:
+ value: "Started"
+ required: false
rsc_order:
- kind: "Optional"
+ kind:
+ value: "Optional"
+ required: false
# === Valid Configurations for different OS versions ===
# Specify the properties that are different for different OS versions
VALID_CONFIGS:
REDHAT:
- priority-fencing-delay: ['15', '15s']
+ priority-fencing-delay:
+ value: ["15", "15s"]
+ required: true
SUSE: {}
AFA:
- have-watchdog: "false"
- stonith-timeout: ["900s", "900"]
+ have-watchdog:
+ value: "false"
+ required: true
+ stonith-timeout:
+ value: ["900s", "900"]
+ required: true
ISCSI:
- have-watchdog: "true"
- stonith-timeout: ["210", "210s"]
-
+ have-watchdog:
+ value: "true"
+ required: true
+ stonith-timeout:
+ value: ["210", "210s"]
+ required: true
+ ASD:
+ have-watchdog:
+ value: "true"
+ required: true
+ stonith-timeout:
+ value: ["210", "210s"]
+ required: true
# === Resource Defaults ===
# cibadmin --query --scope resources
RESOURCE_DEFAULTS:
SUSE:
fence_agent:
+ required: false
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
- pcmk_monitor_timeout: ["120", "120s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
+ pcmk_monitor_timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: ["3600", "3600s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["3600", "3600s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
sbd_stonith:
+ required: false
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
- pcmk_monitor_timeout: ["120", "120s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
+ pcmk_monitor_timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: ["600", "600s"]
- timeout: ["15", "15s"]
+ interval:
+ value: ["600", "600s"]
+ required: false
+ timeout:
+ value: ["15", "15s"]
+ required: false
topology:
+ required: false
meta_attributes:
- clone-node-max: "1"
- target-role: "Started"
- interleave: "true"
+ clone-node-max:
+ value: "1"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["300", "300s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["300", "300s"]
+ required: false
angi_topology:
+ required: false
meta_attributes:
- clone-node-max: "1"
- target-role: "Started"
- interleave: "true"
+ clone-node-max:
+ value: "1"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
operations:
monitor:
- interval: ["50", "50s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["50", "50s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["300", "300s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["300", "300s"]
+ required: false
hana:
+ required: false
meta_attributes:
- notify: "true"
- clone-max: "2"
- clone-node-max: "1"
- target-role: "Started"
- interleave: "true"
- priority: "100"
+ notify:
+ value: "true"
+ required: false
+ clone-max:
+ value: "2"
+ required: false
+ clone-node-max:
+ value: "1"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
+ priority:
+ value: "100"
+ required: false
instance_attributes:
- PREFER_SITE_TAKEOVER: "true"
- DUPLICATE_PRIMARY_TIMEOUT: "7200"
- AUTOMATED_REGISTER: "true"
+ PREFER_SITE_TAKEOVER:
+ value: "true"
+ required: false
+ DUPLICATE_PRIMARY_TIMEOUT:
+ value: "7200"
+ required: false
+ AUTOMATED_REGISTER:
+ value: "true"
+ required: false
operations:
start:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
monitor:
- timeout: ["700", "700s"]
+ timeout:
+ value: ["700", "700s"]
+ required: false
angi_hana:
+ required: false
meta_attributes:
- notify: "true"
- clone-max: "2"
- clone-node-max: "1"
- target-role: "Started"
- interleave: "true"
- priority: "100"
+ notify:
+ value: "true"
+ required: false
+ clone-max:
+ value: "2"
+ required: false
+ clone-node-max:
+ value: "1"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
+ priority:
+ value: "100"
+ required: false
instance_attributes:
- PREFER_SITE_TAKEOVER: "true"
- DUPLICATE_PRIMARY_TIMEOUT: "7200"
- AUTOMATED_REGISTER: "true"
+ PREFER_SITE_TAKEOVER:
+ value: "true"
+ required: false
+ DUPLICATE_PRIMARY_TIMEOUT:
+ value: "7200"
+ required: false
+ AUTOMATED_REGISTER:
+ value: "true"
+ required: false
operations:
start:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
demote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
monitor:
- timeout: ["700", "700s"]
+ timeout:
+ value: ["700", "700s"]
+ required: false
ipaddr:
+ required: false
meta_attributes:
- target-role: "Started"
+ target-role:
+ value: "Started"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
filesystem:
+ required: false
meta_attributes:
- clone-node-max: "1"
- interleave: "true"
+ clone-node-max:
+ value: "1"
+ required: false
+ interleave:
+ value: "true"
+ required: false
operations:
monitor:
- interval: ["120", "120s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["120", "120s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
angi_filesystem:
+ required: false
meta_attributes:
- clone-node-max: "1"
- interleave: "true"
+ clone-node-max:
+ value: "1"
+ required: true
+ interleave:
+ value: "true"
+ required: false
operations:
monitor:
- interval: ["120", "120s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["120", "120s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["10", "10s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["10", "10s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
azurelb:
+ required: false
meta_attributes:
- resource-stickiness: "0"
+ resource-stickiness:
+ value: "0"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
-
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
REDHAT:
fence_agent:
+ required: false
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: "900"
- power_timeout: "240"
- pcmk_monitor_timeout: "120"
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: "900"
+ required: false
+ power_timeout:
+ value: "240"
+ required: false
+ pcmk_monitor_timeout:
+ value: "120"
+ required: false
operations:
monitor:
- interval: ["3600", "3600s"]
+ interval:
+ value: ["3600", "3600s"]
+ required: false
sbd_stonith:
+ required: false
instance_attributes:
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
- pcmk_monitor_timeout: ["120", "120s"]
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
+ pcmk_monitor_timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: ["600", "600s"]
- timeout: ["15", "15s"]
+ interval:
+ value: ["600", "600s"]
+ required: false
+ timeout:
+ value: ["15", "15s"]
+ required: false
topology:
+ required: false
meta_attributes:
- clone-node-max: "1"
- clone-max: "2"
- target-role: "Started"
- interleave: "true"
- failure-timeout: "120s"
+ clone-node-max:
+ value: "1"
+ required: false
+ clone-max:
+ value: "2"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
+ failure-timeout:
+ value: "120s"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["300", "300s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["300", "300s"]
+ required: false
methods:
- timeout: ["5", "5s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["5", "5s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
reload:
- timeout: ["5", "5s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["5", "5s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
hana:
+ required: false
meta_attributes:
- notify: "true"
- clone-max: "2"
- clone-node-max: "1"
- target-role: "Started"
- interleave: "true"
- priority: "100"
+ notify:
+ value: "true"
+ required: false
+ clone-max:
+ value: "2"
+ required: false
+ clone-node-max:
+ value: "1"
+ required: false
+ target-role:
+ value: "Started"
+ required: false
+ interleave:
+ value: "true"
+ required: false
+ priority:
+ value: "100"
+ required: true
instance_attributes:
- PREFER_SITE_TAKEOVER: "true"
- DUPLICATE_PRIMARY_TIMEOUT: "7200"
- AUTOMATED_REGISTER: "true"
+ PREFER_SITE_TAKEOVER:
+ value: "true"
+ required: false
+ DUPLICATE_PRIMARY_TIMEOUT:
+ value: "7200"
+ required: false
+ AUTOMATED_REGISTER:
+ value: "true"
+ required: false
operations:
start:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["3600", "3600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["3600", "3600s"]
+ required: false
monitor:
- timeout: ["700", "700s"]
+ timeout:
+ value: ["700", "700s"]
+ required: false
ipaddr:
meta_attributes:
- target-role: "Started"
+ target-role:
+ value: "Started"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
filesystem:
+ required: false
meta_attributes:
- clone-node-max: "1"
- interleave: "true"
+ clone-node-max:
+ value: "1"
+ required: false
+ interleave:
+ value: "true"
+ required: false
operations:
monitor:
- interval: ["20", "20s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["20", "20s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["60", "60s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["60", "60s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["60", "60s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["60", "60s"]
+ required: false
azurelb:
+ required: false
meta_attributes:
- resource-stickiness: "0"
+ resource-stickiness:
+ value: "0"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
-
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
# === OS Parameters ===
# Run command as root. Format of command is: "parent_key child_key"
@@ -338,39 +714,74 @@ RESOURCE_DEFAULTS:
OS_PARAMETERS:
DEFAULTS:
sysctl:
- net.ipv4.tcp_timestamps: "net.ipv4.tcp_timestamps = 0"
- vm.swappiness: "vm.swappiness = 10"
+ net.ipv4.tcp_timestamps:
+ value: "net.ipv4.tcp_timestamps = 0"
+ required: true
+ vm.swappiness:
+ value: "vm.swappiness = 10"
+ required: true
corosync-cmapctl:
- runtime.config.totem.token: "runtime.config.totem.token (u32) = 30000"
- runtime.config.totem.consensus: "runtime.config.totem.consensus (u32) = 36000"
+ runtime.config.totem.token:
+ value: "runtime.config.totem.token (u32) = 30000"
+ required: true
+ runtime.config.totem.consensus:
+ value: "runtime.config.totem.consensus (u32) = 36000"
+ required: true
# === Global INI ===
# Reading the global.ini file to get the provider and path for the SAPHanaSR resource agent
GLOBAL_INI:
+ GLOBAL_INI:
SUSE:
SAPHanaSR:
- provider: "SAPHanaSR"
- path: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"]
- execution_order: "1"
+ provider:
+ value: "SAPHanaSR"
+ required: true
+ path:
+ value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"]
+ required: true
+ execution_order:
+ value: "1"
+ required: true
SAPHanaSR-angi:
- provider: "susHanaSR"
- path: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"]
- execution_order: "1"
+ provider:
+ value: "susHanaSR"
+ required: true
+ path:
+ value: ["/usr/share/SAPHanaSR", "/hana/shared/myHooks"]
+ required: true
+ execution_order:
+ value: "1"
+ required: true
REDHAT:
SAPHanaSR:
- provider: "SAPHanaSR"
- path: ["/usr/share/SAPHanaSR/srHook", "/hana/shared/myHooks"]
- execution_order: "1"
-
+ provider:
+ value: "SAPHanaSR"
+ required: true
+ path:
+ value: ["/usr/share/SAPHanaSR/srHook", "/hana/shared/myHooks"]
+ required: true
+ execution_order:
+ value: "1"
+ required: true
# === Azure Load Balancer ===
# Azure Load Balancer configuration
AZURE_LOADBALANCER:
PROBES:
- probe_threshold: 2
- interval_in_seconds: 5
-
+ probe_threshold:
+ value: 2
+ required: true
+ interval_in_seconds:
+ value: 5
+ required: true
RULES:
- idle_timeout_in_minutes: 30
- enable_floating_ip: true
- enable_tcp_reset: false
+ idle_timeout_in_minutes:
+ value: 30
+ required: true
+ enable_floating_ip:
+ value: true
+ required: true
+ enable_tcp_reset:
+ value: false
+ required: false
diff --git a/src/roles/ha_scs/tasks/files/constants.yaml b/src/roles/ha_scs/tasks/files/constants.yaml
index 8fec70fc..3348fccf 100644
--- a/src/roles/ha_scs/tasks/files/constants.yaml
+++ b/src/roles/ha_scs/tasks/files/constants.yaml
@@ -7,324 +7,675 @@
# === CRM Configuration Defaults ===
# cibadmin --query --scope crm_config
CRM_CONFIG_DEFAULTS:
- cluster-infrastructure: corosync
- priority-fencing-delay: ["30", "30s"]
- stonith-action: reboot
- stonith-enabled: "true"
- concurrent-fencing: "true"
- maintenance-mode: "false"
- node-health-strategy: "custom"
- azure-events-az_globalPullState: "IDLE"
+ cluster-infrastructure:
+ value: corosync
+ required: false
+ priority-fencing-delay:
+ value: ["30", "30s"]
+ required: true
+ stonith-action:
+ value: reboot
+ required: false
+ stonith-enabled:
+ value: "true"
+ required: false
+ concurrent-fencing:
+ value: "true"
+ required: false
+ maintenance-mode:
+ value: "false"
+ required: false
+ node-health-strategy:
+ value: "custom"
+ required: false
+ azure-events-az_globalPullState:
+ value: "IDLE"
+ required: false
# === Operation Defaults ===
# cibadmin --query --scope op_defaults
OP_DEFAULTS:
- record-pending: "true"
- timeout: ["600", "600s"]
+ record-pending:
+ value: "true"
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
# === Resource Defaults ===
# cibadmin --query --scope rsc_defaults
RSC_DEFAULTS:
- migration-threshold: "3"
- priority: "1"
- resource-stickiness: "1"
+ migration-threshold:
+ value: "3"
+ required: false
+ priority:
+ value: "1"
+ required: false
+ resource-stickiness:
+ value: "1"
+ required: false
# === Constraints ===
# cibadmin --query --scope constraints
CONSTRAINTS:
rsc_colocation:
- score: "-5000"
- rsc-role: "Started"
- with-rsc-role: "Started"
+ score:
+ value: "-5000"
+ required: false
+ rsc-role:
+ value: "Started"
+ required: false
+ with-rsc-role:
+ value: "Started"
+ required: false
rsc_order:
- first-action: "start"
- then-action: "stop"
- symmetrical: "false"
+ first-action:
+ value: "start"
+ required: false
+ then-action:
+ value: "stop"
+ required: false
+ symmetrical:
+ value: "false"
+ required: false
rsc_location:
- score-attribute: "#health-azure"
- operation: "defined"
- attribute: "#uname"
+ score-attribute:
+ value: "#health-azure"
+ required: false
+ operation:
+ value: "defined"
+ required: false
+ attribute:
+ value: "#uname"
+ required: false
# === Valid Configurations for different OS versions ===
# Specify the properties that are different for different OS versions
VALID_CONFIGS:
REDHAT:
- priority-fencing-delay: "15s"
+ priority-fencing-delay:
+ value: ["15", "15s"]
+ required: false
SUSE: {}
AFA:
- have-watchdog: "false"
- stonith-timeout: ["900", "900s"]
+ have-watchdog:
+ value: "false"
+ required: false
+ stonith-timeout:
+ value: ["900", "900s"]
+ required: false
ISCSI:
- have-watchdog: "true"
- stonith-timeout: ["210", "210s"]
+ have-watchdog:
+ value: "true"
+ required: false
+ stonith-timeout:
+ value: ["210", "210s"]
+ required: false
+ ASD:
+ have-watchdog:
+ value: "true"
+ required: false
+ stonith-timeout:
+ value: ["210", "210s"]
+ required: false
# === Resource Defaults ===
# cibadmin --query --scope resources
RESOURCE_DEFAULTS:
SUSE:
fence_agent:
+ required: false
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
operations:
monitor:
- interval: ["3600", "3600s"]
- timeout: ["120", "120s"]
+ interval:
+ value: ["3600", "3600s"]
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
sbd_stonith:
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
- pcmk_monitor_timeout: ["120", "120s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
+ pcmk_monitor_timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: ["600", "600s"]
- timeout: ["15", "15s"]
+ interval:
+ value: ["600", "600s"]
+ required: false
+ timeout:
+ value: ["15", "15s"]
+ required: false
ascs:
instance_attributes:
- AUTOMATIC_RECOVER: "false"
- MINIMAL_PROBE: "true"
+ AUTOMATIC_RECOVER:
+ value: "false"
+ required: false
+ MINIMAL_PROBE:
+ value: "true"
+ required: false
meta_attributes:
- resource-stickiness: "5000"
- priority: "100"
+ resource-stickiness:
+ value: "5000"
+ required: false
+ priority:
+ value: "100"
+ required: true
operations:
monitor:
- interval: ["11", "11s"]
+ interval:
+ value: ["11", "11s"]
+ required: false
timeout:
- ANF: ["105", "105s"]
- AFS: ["60", "60s"]
+ ANF:
+ value: ["105", "105s"]
+ required: false
+ AFS:
+ value: ["60", "60s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["180", "180s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["180", "180s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["240", "240s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["240", "240s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
demote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
ers:
instance_attributes:
- AUTOMATIC_RECOVER: "false"
- MINIMAL_PROBE: "true"
- IS_ERS: "true"
+ AUTOMATIC_RECOVER:
+ value: "false"
+ required: false
+ MINIMAL_PROBE:
+ value: "true"
+ required: false
+ IS_ERS:
+ value: "true"
+ required: false
meta_attributes:
- resource-stickiness: "5000"
- priority: "100"
+ resource-stickiness:
+ value: "5000"
+ required: false
+ priority:
+ value: "100"
+ required: false
operations:
monitor:
- interval: ["11", "11s"]
+ interval:
+ value: ["11", "11s"]
+ required: false
timeout:
- ANF: ["105", "105s"]
- AFS: ["60", "60s"]
+ ANF:
+ value: ["105", "105s"]
+ required: false
+ AFS:
+ value: ["60", "60s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["180", "180s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["180", "180s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["240", "240s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["240", "240s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
demote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
ipaddr:
meta_attributes:
- target-role: "Started"
+ target-role:
+ value: "Started"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
azurelb:
meta_attributes:
- resource-stickiness: "0"
+ resource-stickiness:
+ value: "0"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
azureevents:
meta_attributes:
- allow-unhealthy-nodes: "true"
- failure-timeout: "120s"
+ allow-unhealthy-nodes:
+ value: "true"
+ required: false
+ failure-timeout:
+ value: "120s"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
start:
- interval: ["0", "0s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
REDHAT:
fence_agent:
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
operations:
monitor:
- interval: "3600"
- timeout: ["120", "120s"]
+ interval:
+ value: "3600"
+ required: false
+ timeout:
+ value: ["120", "120s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
sbd_stonith:
instance_attributes:
- pcmk_delay_max: "15"
- pcmk_monitor_retries: "4"
- pcmk_action_limit: "3"
- pcmk_reboot_timeout: ["900", "900s"]
- power_timeout: ["240", "240s"]
- pcmk_monitor_timeout: ["120", "120s"]
+ pcmk_delay_max:
+ value: "15"
+ required: false
+ pcmk_monitor_retries:
+ value: "4"
+ required: false
+ pcmk_action_limit:
+ value: "3"
+ required: false
+ pcmk_reboot_timeout:
+ value: ["900", "900s"]
+ required: false
+ power_timeout:
+ value: ["240", "240s"]
+ required: false
+ pcmk_monitor_timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: "600"
- timeout: ["15", "15s"]
+ interval:
+ value: "600"
+ required: false
+ timeout:
+ value: ["15", "15s"]
+ required: false
ascs:
instance_attributes:
- AUTOMATIC_RECOVER: "false"
- MINIMAL_PROBE: "true"
- meta_attributes:
- resource-stickiness: "5000"
- priority: "10"
+ AUTOMATIC_RECOVER:
+ value: "false"
+ required: false
+ MINIMAL_PROBE:
+ value: "true"
+ required: false
operations:
- monitor:
- interval: ["20", "20s"]
- timeout:
- ANF: ["105", "105s"]
- AFS: ["60", "60s"]
start:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
- promote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
- demote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
+ monitor:
+ interval:
+ value: ["20", "20s"]
+ required: false
+ timeout:
+ ANF:
+ value: ["105", "105s"]
+ required: false
+ AFS:
+ value: ["60", "60s"]
+ required: false
methods:
- timeout: ["5", "5s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["5", "5s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
reload:
- timeout: ["320", "320s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["320", "320s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
+ promote:
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
+ demote:
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
ers:
instance_attributes:
- AUTOMATIC_RECOVER: "false"
- MINIMAL_PROBE: "true"
- IS_ERS: "true"
+ AUTOMATIC_RECOVER:
+ value: "false"
+ required: false
+ MINIMAL_PROBE:
+ value: "true"
+ required: false
+ IS_ERS:
+ value: "true"
+ required: false
meta_attributes:
- resource-stickiness: "3000"
- priority: "100"
+ resource-stickiness:
+ value: "3000"
+ required: false
+ priority:
+ value: "100"
+ required: false
operations:
monitor:
- interval: ["20", "20s"]
+ interval:
+ value: ["20", "20s"]
+ required: false
timeout:
- ANF: ["105", "105s"]
- AFS: ["60", "60s"]
+ ANF:
+ value: ["105", "105s"]
+ required: false
+ AFS:
+ value: ["60", "60s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["600", "600s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["600", "600s"]
+ required: false
promote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
demote:
- interval: ["0", "0s"]
- timeout: ["320", "320s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["320", "320s"]
+ required: false
methods:
- timeout: ["5", "5s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["5", "5s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
reload:
- timeout: ["320", "320s"]
- interval: ["0", "0s"]
+ timeout:
+ value: ["320", "320s"]
+ required: false
+ interval:
+ value: ["0", "0s"]
+ required: false
ipaddr:
meta_attributes:
- target-role: "Started"
+ target-role:
+ value: "Started"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
azurelb:
meta_attributes:
- resource-stickiness: "0"
+ resource-stickiness:
+ value: "0"
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["20", "20s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["20", "20s"]
+ required: false
azureevents:
meta_attributes:
- allow-unhealthy-nodes: "true"
- failure-timeout: ["120", "120s"]
+ allow-unhealthy-nodes:
+ value: "true"
+ required: false
+ failure-timeout:
+ value: ["120", "120s"]
+ required: false
operations:
monitor:
- interval: ["10", "10s"]
- timeout: ["240", "240s"]
+ interval:
+ value: ["10", "10s"]
+ required: false
+ timeout:
+ value: ["240", "240s"]
+ required: false
start:
- interval: ["0", "0s"]
- timeout: ["10", "10s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["10", "10s"]
+ required: false
stop:
- interval: ["0", "0s"]
- timeout: ["10", "10s"]
+ interval:
+ value: ["0", "0s"]
+ required: false
+ timeout:
+ value: ["10", "10s"]
+ required: false
# === OS Parameters ===
@@ -333,20 +684,38 @@ RESOURCE_DEFAULTS:
OS_PARAMETERS:
DEFAULTS:
sysctl:
- net.ipv4.tcp_timestamps: "net.ipv4.tcp_timestamps = 0"
- vm.swappiness: "vm.swappiness = 10"
+ net.ipv4.tcp_timestamps:
+ value: "net.ipv4.tcp_timestamps = 0"
+ required: false
+ vm.swappiness:
+ value: "vm.swappiness = 10"
+ required: false
corosync-cmapctl:
- runtime.config.totem.token: "runtime.config.totem.token (u32) = 30000"
- runtime.config.totem.consensus: "runtime.config.totem.consensus (u32) = 36000"
+ runtime.config.totem.token:
+ value: "runtime.config.totem.token (u32) = 30000"
+ required: false
+ runtime.config.totem.consensus:
+ value: "runtime.config.totem.consensus (u32) = 36000"
+ required: false
# === Azure Load Balancer ===
# Azure Load Balancer configuration
AZURE_LOADBALANCER:
PROBES:
- probe_threshold: 2
- interval_in_seconds: 5
+ probe_threshold:
+ value: 2
+ required: false
+ interval_in_seconds:
+ value: 5
+ required: false
RULES:
- idle_timeout_in_minutes: 30
- enable_floating_ip: true
- enable_tcp_reset: false
+ idle_timeout_in_minutes:
+ value: 30
+ required: false
+ enable_floating_ip:
+ value: true
+ required: false
+ enable_tcp_reset:
+ value: false
+ required: false
diff --git a/src/templates/report.html b/src/templates/report.html
index b4a77595..c5410a96 100644
--- a/src/templates/report.html
+++ b/src/templates/report.html
@@ -111,7 +111,7 @@
.pie {
--passed: #2a882e; /* Adjusted green to be brighter */
--failed: #df3d3d; /* Kept red but slightly adjusted */
- --warning: #fcd116;
+ --warning: #d36325;
--size: 150px;
--thickness: 12px;
--progress: 0;
@@ -193,7 +193,7 @@
}
.test-case.warning .title {
- background-color: #fcd116;
+ background-color: #d36325;
color: white;
}
diff --git a/src/vars/input-api.yaml b/src/vars/input-api.yaml
index 41e0d6b0..58e7a73c 100644
--- a/src/vars/input-api.yaml
+++ b/src/vars/input-api.yaml
@@ -29,7 +29,7 @@ test_groups:
including Corosync settings, Pacemaker resources, SBD device configuration,
and HANA system replication setup. This test is run in an offline mode where the CIB files are
already available in the offline_validation directory.
- enabled: false
+ enabled: true
- name: Azure Load Balancer Validation
task_name: azure-lb
@@ -156,7 +156,7 @@ test_groups:
including Corosync settings, Pacemaker resources, SBD device configuration, and SCS system
replication setup. This test is run in an offline mode where the CIB files are
already available in the offline_validation directory.
- enabled: false
+ enabled: true
- name: Azure Load Balancer Validation
task_name: azure-lb
diff --git a/tests/module_utils/get_pcmk_properties_test.py b/tests/module_utils/get_pcmk_properties_test.py
index ea96dcbe..02b28315 100644
--- a/tests/module_utils/get_pcmk_properties_test.py
+++ b/tests/module_utils/get_pcmk_properties_test.py
@@ -88,53 +88,74 @@
DUMMY_CONSTANTS = {
"VALID_CONFIGS": {
- "REDHAT": {"stonith-enabled": "true", "cluster-name": "hdb_HDB"},
- "azure-fence-agent": {"priority": "10"},
- "sbd": {"pcmk_delay_max": "30"},
+ "REDHAT": {
+ "stonith-enabled": {"value": "true", "required": False},
+ "cluster-name": {"value": "hdb_HDB", "required": False},
+ },
+ "azure-fence-agent": {"priority": {"value": "10", "required": False}},
+ "sbd": {"pcmk_delay_max": {"value": "30", "required": False}},
},
"RSC_DEFAULTS": {
- "resource-stickiness": "1000",
- "migration-threshold": "5000",
+ "resource-stickiness": {"value": "1000", "required": False},
+ "migration-threshold": {"value": "5000", "required": False},
},
"OP_DEFAULTS": {
- "timeout": "600",
- "record-pending": "true",
+ "timeout": {"value": "600", "required": False},
+ "record-pending": {"value": "true", "required": False},
},
"CRM_CONFIG_DEFAULTS": {
- "stonith-enabled": "true",
- "maintenance-mode": "false",
+ "stonith-enabled": {"value": "true", "required": False},
+ "maintenance-mode": {"value": "false", "required": False},
},
"RESOURCE_DEFAULTS": {
"REDHAT": {
"fence_agent": {
- "meta_attributes": {"pcmk_delay_max": "15", "target-role": "Started"},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "15", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
"operations": {
- "monitor": {"timeout": ["700", "700s"], "interval": "10"},
- "start": {"timeout": "20"},
+ "monitor": {
+ "timeout": {"value": ["700", "700s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
},
- "instance_attributes": {"login": "testuser"},
+ "instance_attributes": {"login": {"value": "testuser", "required": False}},
},
"sbd_stonith": {
- "meta_attributes": {"pcmk_delay_max": "30", "target-role": "Started"},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "30", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
"operations": {
- "monitor": {"timeout": ["30", "30s"], "interval": "10"},
- "start": {"timeout": "20"},
+ "monitor": {
+ "timeout": {"value": ["30", "30s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
},
},
"test_resource": {
- "meta_attributes": {"clone-max": "2"},
- "operations": {"monitor": {"timeout": ["600", "600s"]}},
- "instance_attributes": {"SID": "HDB"},
+ "meta_attributes": {"clone-max": {"value": "2", "required": False}},
+ "operations": {
+ "monitor": {"timeout": {"value": ["600", "600s"], "required": False}}
+ },
+ "instance_attributes": {"SID": {"value": "HDB", "required": False}},
},
}
},
"OS_PARAMETERS": {
- "DEFAULTS": {"sysctl": {"kernel.numa_balancing": "kernel.numa_balancing = 0"}}
+ "DEFAULTS": {
+ "sysctl": {
+ "kernel.numa_balancing": {"value": "kernel.numa_balancing = 0", "required": False}
+ }
+ }
},
"CONSTRAINTS": {
- "rsc_location": {"score": "INFINITY"},
- "rsc_colocation": {"score": "4000"},
- "rsc_order": {"kind": "Optional"},
+ "rsc_location": {"score": {"value": "INFINITY", "required": False}},
+ "rsc_colocation": {"score": {"value": "4000", "required": False}},
+ "rsc_order": {"kind": {"value": "Optional", "required": False}},
},
}
@@ -254,7 +275,7 @@ def test_get_expected_value_fence_config(self, validator):
"""
validator.fencing_mechanism = "azure-fence-agent"
expected = validator._get_expected_value("crm_config", "priority")
- assert expected == "10"
+ assert expected == ("10", False)
def test_get_resource_expected_value_instance_attributes(self, validator):
"""
@@ -263,7 +284,7 @@ def test_get_resource_expected_value_instance_attributes(self, validator):
expected = validator._get_resource_expected_value(
"fence_agent", "instance_attributes", "login"
)
- assert expected == "testuser"
+ assert expected == ("testuser", False)
def test_get_resource_expected_value_invalid_section(self, validator):
"""
@@ -307,30 +328,16 @@ def test_determine_parameter_status_success_string(self, validator):
"""
Test _determine_parameter_status method with matching string values.
"""
- status = validator._determine_parameter_status("true", "true")
+ status = validator._determine_parameter_status("true", ("true", False))
assert status == TestStatus.SUCCESS.value
def test_determine_parameter_status_error_string(self, validator):
"""
Test _determine_parameter_status method with non-matching string values.
"""
- status = validator._determine_parameter_status("true", "false")
+ status = validator._determine_parameter_status("true", ("false", False))
assert status == TestStatus.ERROR.value
- def test_parse_basic_config(self, validator):
- """
- Test _parse_basic_config method.
- """
- xml_str = """
-
-
- """
- params = validator._parse_basic_config(
- ET.fromstring(xml_str), "crm_config", "test_subcategory"
- )
- assert len(params) == 2
- assert params[0]["category"] == "crm_config_test_subcategory"
-
def test_parse_resource_with_operations(self, validator):
"""
Test _parse_resource method with operations.
@@ -347,25 +354,6 @@ def test_parse_resource_with_operations(self, validator):
assert len(timeout_params) == 2
assert len(interval_params) == 2
- def test_parse_constraints(self, validator):
- """
- Test _parse_constraints method.
- """
- xml_str = """
-
-
-
-
- """
- root = ET.fromstring(xml_str)
- params = validator._parse_constraints(root)
- location_params = [p for p in params if "rsc_location" in p["category"]]
- colocation_params = [p for p in params if "rsc_colocation" in p["category"]]
- order_params = [p for p in params if "rsc_order" in p["category"]]
- assert len(location_params) >= 1
- assert len(colocation_params) >= 1
- assert len(order_params) >= 1
-
def test_parse_resources_section(self, validator):
"""
Test _parse_resources_section method.
@@ -410,15 +398,6 @@ def test_get_scope_from_cib_without_cib_output(self, validator):
scope_element = validator._get_scope_from_cib("resources")
assert scope_element is None
- def test_parse_ha_cluster_config_with_cib(self, validator_with_cib):
- """
- Test parse_ha_cluster_config method with CIB output.
- """
- validator_with_cib.parse_ha_cluster_config()
- result = validator_with_cib.get_result()
- assert result["status"] in [TestStatus.SUCCESS.value, TestStatus.ERROR.value]
- assert "parameters" in result["details"]
-
def test_get_expected_value_for_category_resource(self, validator):
"""
Test _get_expected_value_for_category method for resource category.
@@ -426,7 +405,7 @@ def test_get_expected_value_for_category_resource(self, validator):
expected = validator._get_expected_value_for_category(
"fence_agent", "meta_attributes", "pcmk_delay_max", None
)
- assert expected == "15"
+ assert expected == ("15", False)
def test_get_expected_value_for_category_basic(self, validator):
"""
@@ -435,26 +414,14 @@ def test_get_expected_value_for_category_basic(self, validator):
expected = validator._get_expected_value_for_category(
"crm_config", None, "stonith-enabled", None
)
- assert expected == "true"
+ assert expected == ("true", False)
- def test_determine_parameter_status_error_invalid_expected(self, validator):
+ def test_determine_parameter_status_with_required_parameter(self, validator):
"""
- Test _determine_parameter_status method with invalid expected value type.
+ Test _determine_parameter_status method with required parameter.
"""
- status = validator._determine_parameter_status("value", {"invalid": "dict"})
- assert status == TestStatus.ERROR.value
-
- def test_parse_constraints_skip_missing_attributes(self, validator):
- """
- Test _parse_constraints method skips elements with missing attributes.
- """
- xml_str = """
-
- """
- root = ET.fromstring(xml_str)
- params = validator._parse_constraints(root)
- score_params = [p for p in params if p["name"] == "score"]
- assert len(score_params) == 0
+ status = validator._determine_parameter_status("", ("expected_value", True))
+ assert status == TestStatus.WARNING.value
def test_get_scope_from_cib_invalid_scope(self, validator_with_cib):
"""
diff --git a/tests/modules/get_pcmk_properties_db_test.py b/tests/modules/get_pcmk_properties_db_test.py
index 135b7b7b..bdd75f7e 100644
--- a/tests/modules/get_pcmk_properties_db_test.py
+++ b/tests/modules/get_pcmk_properties_db_test.py
@@ -146,47 +146,90 @@
DUMMY_CONSTANTS = {
"VALID_CONFIGS": {
- "REDHAT": {"stonith-enabled": "true"},
- "azure-fence-agent": {"priority": "10"},
+ "REDHAT": {
+ "stonith-enabled": {"value": "true", "required": False},
+ "cluster-name": {"value": "hdb_HDB", "required": False},
+ },
+ "azure-fence-agent": {"priority": {"value": "10", "required": False}},
+ "sbd": {"pcmk_delay_max": {"value": "30", "required": False}},
},
"RSC_DEFAULTS": {
- "resource-stickiness": "1000",
- "migration-threshold": "5000",
+ "resource-stickiness": {"value": "1000", "required": False},
+ "migration-threshold": {"value": "5000", "required": False},
},
"OP_DEFAULTS": {
- "timeout": "600",
- "record-pending": "true",
+ "timeout": {"value": "600", "required": False},
+ "record-pending": {"value": "true", "required": False},
+ },
+ "CRM_CONFIG_DEFAULTS": {
+ "stonith-enabled": {"value": "true", "required": False},
+ "maintenance-mode": {"value": "false", "required": False},
},
- "CRM_CONFIG_DEFAULTS": {"stonith-enabled": "true"},
"RESOURCE_DEFAULTS": {
"REDHAT": {
"fence_agent": {
- "meta_attributes": {"pcmk_delay_max": "15"},
- "operations": {"monitor": {"timeout": ["700", "700s"]}},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "15", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
+ "operations": {
+ "monitor": {
+ "timeout": {"value": ["700", "700s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
+ },
+ "instance_attributes": {"login": {"value": "testuser", "required": False}},
},
"sbd_stonith": {
- "meta_attributes": {"pcmk_delay_max": "15"},
- "operations": {"monitor": {"timeout": ["30", "30s"]}},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "30", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
+ "operations": {
+ "monitor": {
+ "timeout": {"value": ["30", "30s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
+ },
+ },
+ "hana": {
+ "meta_attributes": {"clone-max": {"value": "2", "required": False}},
+ "operations": {
+ "monitor": {"timeout": {"value": ["600", "600s"], "required": False}}
+ },
+ "instance_attributes": {"SID": {"value": "HDB", "required": False}},
},
- "hana": {"meta_attributes": {"clone-max": "2"}},
}
},
"OS_PARAMETERS": {
- "DEFAULTS": {"sysctl": {"kernel.numa_balancing": "kernel.numa_balancing = 0"}}
+ "DEFAULTS": {
+ "sysctl": {
+ "kernel.numa_balancing": {"value": "kernel.numa_balancing = 0", "required": False}
+ }
+ }
},
"GLOBAL_INI": {
"REDHAT": {
"SAPHanaSR": {
- "provider": "SAPHanaSR",
- "path": "/usr/share/SAPHanaSR",
- "execution_order": ["1", "2"],
+ "provider": {"value": "SAPHanaSR", "required": False},
+ "path": {"value": "/usr/share/SAPHanaSR", "required": False},
+ "execution_order": {"value": ["1", "2"], "required": False},
}
},
"SUSE": {
- "SAPHanaSR-angi": {"provider": "SAPHanaSR-angi", "path": "/usr/share/SAPHanaSR-angi"}
+ "SAPHanaSR-angi": {
+ "provider": {"value": "SAPHanaSR-angi", "required": False},
+ "path": {"value": "/usr/share/SAPHanaSR-angi", "required": False},
+ }
},
},
- "CONSTRAINTS": {"rsc_location": {"score": "INFINITY"}},
+ "CONSTRAINTS": {
+ "rsc_location": {"score": {"value": "INFINITY", "required": False}},
+ "rsc_colocation": {"score": {"value": "4000", "required": False}},
+ "rsc_order": {"kind": {"value": "Optional", "required": False}},
+ },
}
@@ -552,26 +595,13 @@ def test_get_expected_value_methods(self, validator):
"""
validator.fencing_mechanism = "azure-fence-agent"
expected = validator._get_expected_value("crm_config", "priority")
- assert expected == "10"
+ assert expected == ("10", False)
expected = validator._get_expected_value("crm_config", "stonith-enabled")
- assert expected == "true"
+ assert expected == ("true", False)
expected = validator._get_resource_expected_value(
"fence_agent", "meta_attributes", "pcmk_delay_max"
)
- assert expected == "15"
-
- def test_parse_constraints_with_valid_constraints(self, validator):
- """
- Test _parse_constraints method with valid constraints.
- """
- xml_str = """
-
-
-
- """
- root = ET.fromstring(xml_str)
- params = validator._parse_constraints(root)
- assert len(params) > 0
+ assert expected == ("15", False)
def test_successful_validation_result(self, validator):
"""
diff --git a/tests/modules/get_pcmk_properties_scs_test.py b/tests/modules/get_pcmk_properties_scs_test.py
index a2f542c9..51bac43e 100644
--- a/tests/modules/get_pcmk_properties_scs_test.py
+++ b/tests/modules/get_pcmk_properties_scs_test.py
@@ -125,61 +125,96 @@
DUMMY_CONSTANTS = {
"VALID_CONFIGS": {
- "REDHAT": {"stonith-enabled": "true", "cluster-name": "scs_S4D"},
- "azure-fence-agent": {"priority": "10"},
- "sbd": {"pcmk_delay_max": "30"},
+ "REDHAT": {
+ "stonith-enabled": {"value": "true", "required": False},
+ "cluster-name": {"value": "scs_S4D", "required": False},
+ },
+ "azure-fence-agent": {"priority": {"value": "10", "required": False}},
+ "sbd": {"pcmk_delay_max": {"value": "30", "required": False}},
},
"RSC_DEFAULTS": {
- "resource-stickiness": "1000",
- "migration-threshold": "5000",
+ "resource-stickiness": {"value": "1000", "required": False},
+ "migration-threshold": {"value": "5000", "required": False},
},
"OP_DEFAULTS": {
- "timeout": "600",
- "record-pending": "true",
+ "timeout": {"value": "600", "required": False},
+ "record-pending": {"value": "true", "required": False},
},
"CRM_CONFIG_DEFAULTS": {
- "stonith-enabled": "true",
- "maintenance-mode": "false",
+ "stonith-enabled": {"value": "true", "required": False},
+ "maintenance-mode": {"value": "false", "required": False},
},
"RESOURCE_DEFAULTS": {
"REDHAT": {
"fence_agent": {
- "meta_attributes": {"pcmk_delay_max": "15", "target-role": "Started"},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "15", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
"operations": {
- "monitor": {"timeout": ["700", "700s"], "interval": "10"},
- "start": {"timeout": "20"},
+ "monitor": {
+ "timeout": {"value": ["700", "700s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
+ },
+ "instance_attributes": {
+ "login": {"value": "testuser", "required": False},
+ "resourceGroup": {"value": "test-rg", "required": False},
},
- "instance_attributes": {"login": "testuser", "resourceGroup": "test-rg"},
},
"sbd_stonith": {
- "meta_attributes": {"pcmk_delay_max": "30", "target-role": "Started"},
+ "meta_attributes": {
+ "pcmk_delay_max": {"value": "30", "required": False},
+ "target-role": {"value": "Started", "required": False},
+ },
"operations": {
- "monitor": {"timeout": ["30", "30s"], "interval": "10"},
- "start": {"timeout": "20"},
+ "monitor": {
+ "timeout": {"value": ["30", "30s"], "required": False},
+ "interval": {"value": "10", "required": False},
+ },
+ "start": {"timeout": {"value": "20", "required": False}},
},
},
"ascs": {
- "meta_attributes": {"target-role": "Started"},
- "operations": {"monitor": {"timeout": ["600", "600s"]}},
- "instance_attributes": {"InstanceName": "S4D_ASCS00_sapascs"},
+ "meta_attributes": {"target-role": {"value": "Started", "required": False}},
+ "operations": {
+ "monitor": {"timeout": {"value": ["600", "600s"], "required": False}}
+ },
+ "instance_attributes": {
+ "InstanceName": {"value": "S4D_ASCS00_sapascs", "required": False}
+ },
},
"ers": {
- "meta_attributes": {"target-role": "Started"},
- "operations": {"monitor": {"timeout": ["600", "600s"]}},
- "instance_attributes": {"InstanceName": "S4D_ERS10_sapers"},
+ "meta_attributes": {"target-role": {"value": "Started", "required": False}},
+ "operations": {
+ "monitor": {"timeout": {"value": ["600", "600s"], "required": False}}
+ },
+ "instance_attributes": {
+ "InstanceName": {"value": "S4D_ERS10_sapers", "required": False}
+ },
},
"ipaddr": {
- "instance_attributes": {"ip": {"AFS": ["10.0.1.100"], "ANF": ["10.0.1.101"]}}
+ "instance_attributes": {
+ "ip": {
+ "value": {"AFS": ["10.0.1.100"], "ANF": ["10.0.1.101"]},
+ "required": False,
+ }
+ }
},
}
},
"OS_PARAMETERS": {
- "DEFAULTS": {"sysctl": {"kernel.numa_balancing": "kernel.numa_balancing = 0"}}
+ "DEFAULTS": {
+ "sysctl": {
+ "kernel.numa_balancing": {"value": "kernel.numa_balancing = 0", "required": False}
+ }
+ }
},
"CONSTRAINTS": {
- "rsc_location": {"score": "INFINITY"},
- "rsc_colocation": {"score": "4000"},
- "rsc_order": {"kind": "Optional"},
+ "rsc_location": {"score": {"value": "INFINITY", "required": False}},
+ "rsc_colocation": {"score": {"value": "4000", "required": False}},
+ "rsc_order": {"kind": {"value": "Optional", "required": False}},
},
}
@@ -303,7 +338,7 @@ def test_get_expected_value_for_category_resource(self, validator):
expected = validator._get_expected_value_for_category(
"fence_agent", "meta_attributes", "pcmk_delay_max", None
)
- assert expected == "15"
+ assert expected == ("15", False)
def test_get_expected_value_for_category_ascs_ers(self, validator):
"""
@@ -312,11 +347,11 @@ def test_get_expected_value_for_category_ascs_ers(self, validator):
expected = validator._get_expected_value_for_category(
"ascs", "meta_attributes", "target-role", None
)
- assert expected == "Started"
+ assert expected == ("Started", False)
expected = validator._get_expected_value_for_category(
"ers", "meta_attributes", "target-role", None
)
- assert expected == "Started"
+ assert expected == ("Started", False)
def test_get_expected_value_for_category_basic(self, validator):
"""
@@ -325,15 +360,16 @@ def test_get_expected_value_for_category_basic(self, validator):
expected = validator._get_expected_value_for_category(
"crm_config", None, "stonith-enabled", None
)
- assert expected == "true"
+ assert expected == ("true", False)
- def test_determine_parameter_status_with_dict_expected_value_anf(self, validator_anf):
+ def test_determine_parameter_status_with_list_expected_value(self, validator):
"""
- Test _determine_parameter_status method with dict expected value and ANF provider.
+ Test _determine_parameter_status method with list expected value.
"""
- status = validator_anf._determine_parameter_status(
- "10.0.1.101", {"AFS": ["10.0.1.100"], "ANF": ["10.0.1.101"]}
+ status = validator._determine_parameter_status(
+ "10.0.1.101", (["10.0.1.100", "10.0.1.101"], False)
)
+ print(f"Actual status: {status}, Expected: {TestStatus.SUCCESS.value}")
assert status == TestStatus.SUCCESS.value
def test_determine_parameter_status_info_cases(self, validator):
@@ -465,25 +501,6 @@ def test_resource_categories_defined(self, validator):
assert category in HAClusterValidator.RESOURCE_CATEGORIES
assert HAClusterValidator.RESOURCE_CATEGORIES[category].startswith(".//")
- def test_parse_constraints_with_location_constraints(self, validator):
- """
- Test _parse_constraints method with location constraints.
- """
- xml_str = """
-
-
-
-
- """
- root = ET.fromstring(xml_str)
- params = validator._parse_constraints(root)
- location_params = [p for p in params if "rsc_location" in p["category"]]
- colocation_params = [p for p in params if "rsc_colocation" in p["category"]]
- order_params = [p for p in params if "rsc_order" in p["category"]]
- assert len(location_params) >= 1
- assert len(colocation_params) >= 1
- assert len(order_params) >= 1
-
def test_successful_validation_result(self, validator):
"""
Test that validator returns proper result structure.
@@ -518,20 +535,20 @@ def test_get_expected_value_methods_coverage(self, validator):
"""
validator.fencing_mechanism = "azure-fence-agent"
expected = validator._get_expected_value("crm_config", "priority")
- assert expected == "10"
+ assert expected == ("10", False)
expected = validator._get_expected_value("crm_config", "stonith-enabled")
- assert expected == "true"
+ assert expected == ("true", False)
expected = validator._get_resource_expected_value(
"fence_agent", "meta_attributes", "pcmk_delay_max"
)
- assert expected == "15"
+ assert expected == ("15", False)
expected = validator._get_resource_expected_value(
"fence_agent", "operations", "timeout", "monitor"
)
- assert expected == ["700", "700s"]
+ assert expected == (["700", "700s"], False)
expected = validator._get_resource_expected_value(
"fence_agent", "instance_attributes", "login"
)
- assert expected == "testuser"
+ assert expected == ("testuser", False)
expected = validator._get_resource_expected_value("fence_agent", "unknown_section", "param")
assert expected is None
From ff35e207f955034b0c60985b89ab485df3be4315 Mon Sep 17 00:00:00 2001
From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com>
Date: Tue, 30 Sep 2025 16:40:57 +0000
Subject: [PATCH 4/6] Restructure the documents for high availability testing
(#116)
---
.github/copilot-instructions.md | 67 ++++++++++++
...VALIDATION.md => HA_OFFLINE_VALIDATION.md} | 0
docs/HIGH_AVAILABILITY.md | 94 +---------------
docs/SETUP.MD | 101 ++++++++++++++++++
.../DB_HIGH_AVAILABILITY.md | 4 +-
.../SCS_HIGH_AVAILABILITY.md | 24 ++---
.../pseudocode/block-network.md | 0
.../pseudocode/crash-index.md | 0
.../pseudocode/echo-b.md | 0
.../pseudocode/fs-freeze.md | 0
.../pseudocode/ha-failover-to-node.md | 0
.../pseudocode/kill-message-server.md | 0
.../pseudocode/manual-restart.md | 0
.../pseudocode/node-crash.md | 0
.../pseudocode/node-kill.md | 0
.../pseudocode/resource-migration.md | 0
.../pseudocode/sapcontrol-config.md | 0
.../pseudocode/sbd-fencing.md | 0
18 files changed, 185 insertions(+), 105 deletions(-)
create mode 100644 .github/copilot-instructions.md
rename docs/{OFFLINE_VALIDATION.md => HA_OFFLINE_VALIDATION.md} (100%)
create mode 100644 docs/SETUP.MD
rename docs/{ => high_availability}/DB_HIGH_AVAILABILITY.md (96%)
rename docs/{ => high_availability}/SCS_HIGH_AVAILABILITY.md (78%)
rename docs/{ => high_availability}/pseudocode/block-network.md (100%)
rename docs/{ => high_availability}/pseudocode/crash-index.md (100%)
rename docs/{ => high_availability}/pseudocode/echo-b.md (100%)
rename docs/{ => high_availability}/pseudocode/fs-freeze.md (100%)
rename docs/{ => high_availability}/pseudocode/ha-failover-to-node.md (100%)
rename docs/{ => high_availability}/pseudocode/kill-message-server.md (100%)
rename docs/{ => high_availability}/pseudocode/manual-restart.md (100%)
rename docs/{ => high_availability}/pseudocode/node-crash.md (100%)
rename docs/{ => high_availability}/pseudocode/node-kill.md (100%)
rename docs/{ => high_availability}/pseudocode/resource-migration.md (100%)
rename docs/{ => high_availability}/pseudocode/sapcontrol-config.md (100%)
rename docs/{ => high_availability}/pseudocode/sbd-fencing.md (100%)
diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md
new file mode 100644
index 00000000..61593f77
--- /dev/null
+++ b/.github/copilot-instructions.md
@@ -0,0 +1,67 @@
+# SAP Testing Automation Framework - Copilot Instructions
+
+## Project Context
+
+This is the SAP Testing Automation Framework—an open-source orchestration tool for validating SAP deployments on Microsoft Azure. The framework focuses on HA testing for SAP HANA Scale-Up and SAP Central Services in two-node Pacemaker clusters.
+
+### Key Technologies & Architecture
+- **Primary Stack**: Python 3.10+, Ansible, Azure CLI/APIs
+- **Target Environment**: SAP on Azure (SLES/RHEL clusters)
+- **Testing Focus**: HA functional testing, configuration validation, failover scenarios
+- **Structure**: Modular design with separate modules, roles, and utilities
+- **Standards**: pytest for testing, pylint/black for code quality, 85% code coverage requirement
+
+### Project Structure Understanding
+- `src/`: Core framework code (Ansible modules, playbooks, utilities)
+- `tests/`: Comprehensive pytest test suite
+- `docs/`: Architecture and integration documentation
+- `WORKSPACES/`: System-specific configurations and credentials
+- Key files: `pyproject.toml` (project config), Ansible playbooks for HA testing
+
+### Enterprise-Grade & OOP Defaults (mandatory)
+
+#### Enterprise-grade by default. No compromises.
+
+- Production-ready code: safe defaults, clear failure modes, strict typing, deterministic behavior.
+- Observability: structured logging, metrics hooks, and trace-friendly correlation IDs.
+- Resilience: timeouts, bounded retries with jitter/backoff, idempotency, and circuit-breaker patterns.
+- Security: least privilege, no plaintext secrets, input validation, deny-by-default.
+- Performance hygiene: avoid needless subprocess calls, batch remote ops, reduce SSH/chatty loops.
+
+##### Object-Oriented mindset for every answer and artifact.
+- Favor well-named classes with SRP, clear interfaces, and dependency inversion.
+- Encapsulate external systems (Azure, OS, Ansible runner) behind ports/adapters.
+- Model states and workflows as explicit types; avoid “stringly typed” protocols.
+- Provide seams for testing via interfaces and small, mockable collaborators.
+
+## Coding Partnership Rules
+
+Follow these rules at all times:
+
+1. **Be critical, not agreeable**:
+ - Do not just follow assumptions. Flag missing context and risky design choices.
+ - Provide counterpoints/alternatives, esp. for SAP/Azure specifics that look wrong.
+
+2. **Apply best design principles**:
+ - SOLID, DRY, KISS, clear separation of concerns.
+ - Maintainability > cleverness. Small units > god-objects.
+ - Production SAP constraints: reliability, observability, rollback plans, and operability.
+
+3. **Cover edge cases**:
+ - Empty/invalid inputs, boundary conditions, transient Azure failures, partial cluster outages,
+quorum loss, fencing misconfig, split-brain, storage throttling, DNS/MI/IMDS hiccups.
+
+4. **Output style**:
+ - Concise. Minimal yet complete code. Black-formatted, pylint-clean, ≤100-char lines.
+ - Include types, docstrings, explicit exceptions. Show tests when relevant.
+
+5. **Collaboration stance**:
+ - Act as a Principal software reviewer. Push back on weak requests or ambiguous scope.
+ - Offer 2–3 viable designs when trade-offs exist, with crisp pros/cons.
+
+## Project-Specific Guidance
+
+- **Ansible Modules**: Follow the existing module pattern with proper error handling and result objects
+- **Testing**: Maintain 85% code coverage, use pytest fixtures effectively
+- **SAP Context**: Understand HA requirements, cluster behavior, and Azure integration points
+- **Documentation**: Update relevant docs when making architectural changes
diff --git a/docs/OFFLINE_VALIDATION.md b/docs/HA_OFFLINE_VALIDATION.md
similarity index 100%
rename from docs/OFFLINE_VALIDATION.md
rename to docs/HA_OFFLINE_VALIDATION.md
diff --git a/docs/HIGH_AVAILABILITY.md b/docs/HIGH_AVAILABILITY.md
index 111c45cf..924f6c43 100644
--- a/docs/HIGH_AVAILABILITY.md
+++ b/docs/HIGH_AVAILABILITY.md
@@ -28,19 +28,9 @@ Currently SAP Testing Automation Framework is supported for below Linux distros
For SAP Central Services on SLES, both the simple mount approach and the classic method are supported.
-## Technical Requirements for running Automation Framework
-
-To run the SAP Testing Automation Framework, you must meet certain prerequisites and follow techincal requirements.
-
-### SAP System Deployment on Microsoft Azure
-
-- The SAP system must be hosted on Microsoft Azure Infrastructure-as-a-Service (IaaS).
-- The SAP system deploymed should follow SAP on Azure best practices as outlined in:
- - [SAP HANA high availability on Azure Virtual Machine](https://learn.microsoft.com/azure/sap/workloads/sap-high-availability-guide-start).
- - [SAP Netweaver high availability on Azure Virtual Machine](https://learn.microsoft.com/azure/sap/workloads/sap-high-availability-guide-start)
### Enabling Cluster Services on Boot
-
+
Before executing the tests, ensure that the cluster services are configured to start automatically during system boot. Run the following command on one of the cluster nodes to enable this setting. The `--all` option ensures that the cluster services are enabled on all nodes within the cluster.
```bash
@@ -48,87 +38,9 @@ crm cluster enable --all # for SUSE virtual machines
pcs cluster enable --all # for RedHat virtual machine
```
-### Management server
-
-The SAP Testing Automation Framework requires a jumpbox or management server with the following setup:
-
-- **Operating System**: Supported (Ubuntu 22.04 LTS, SLES 15 SP4, 15 SP6).
-- **Location**: Must be deployed on Azure.
-
-### Azure RBAC
-
-For the framework to access the properties of the Azure Load Balancer in a high availability SAP system on Azure, the management server must have a Reader role assigned to the Load Balancer. This can be done using either a system-assigned or user-assigned managed identity.
-
-#### Configuring access using system-assigned managed identity
-
-1. Enable system managed identity on the management server by following the steps in [Configure managed identities on Azure VMs](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-configure-managed-identities?pivots=qs-configure-portal-windows-vm#system-assigned-managed-identity).
-1. Open the Azure Load Balancer used for the high availability deployment of your SAP system on Azure.
-1. In the Azure Load Balancer panel, go to Access control (IAM).
-1. Follow steps from [Use managed identity to access Azure Resource](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal) to complete the configuration.
-
-#### Configuring access using user-assigned managed identity
-
-1. Create user-assigned managed identity as described in [manage user-assigned managed identities](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-manage-user-assigned-managed-identities?pivots=identity-mi-methods-azp#create-a-user-assigned-managed-identity)
-1. Assign user-assigned managed identity to management server as described in [configure managed identities on Azure VMs](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-configure-managed-identities?pivots=qs-configure-portal-windows-vm#assign-a-user-assigned-managed-identity-to-an-existing-vm)
-1. Open the Azure Load Balancer used for the high availability deployment of your SAP system on Azure.
-1. In the Azure Load Balancer panel, go to Access control (IAM).
-1. Assign the required role to the user-assigned managed identity by following the steps in [assign roles using Azure portal](https://learn.microsoft.com/azure/role-based-access-control/role-assignments-portal).
-
-### Network Connectivity
-
-The management server must have network connectivity to the SAP system to perform tests and validations. You can establish this connection by peering the networks as outlined in [manage a virtual network peering](https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-peering?tabs=peering-portal).
-
-### Analytics Integration (optional)
-
-- **Analytics Integration** [Telemetry Setup Information](./TELEMETRY_SETUP.md)
- - Azure Log Analytics
- - Azure Data Explorer
-
-## Getting Started
-
-### 1. Environment Setup
-
-To set up your environment in management server, follow these steps:
+### 1. Setup Configuration
-1.1. **Login to the management server**:
-
-Ensure you are logged into the management server that is connected to the SAP system's virtual network.
-
-1.2. **Install git on management server**:
-
-```bash
-# Debian/Ubuntu
-sudo su -
-apt-get install git
-
-# RHEL/CentOS
-sudo su -
-yum install git
-
-# SUSE
-sudo su -
-zypper install git
-```
-
-1.3. **Fork and clone the repository**:
-
-```bash
-# sudo to root
-sudo su -
-
-# First, visit https://github.com/Azure/sap-automation-qa in your browser
-# Click the "Fork" button in the top-right corner to create a fork in your GitHub account
-
-# Clone your fork of the repository (replace GITHUB-USERNAME with your GitHub username)
-git clone https://github.com/GITHUB-USERNAME/sap-automation-qa.git
-cd sap-automation-qa
-```
-
-1.4. **Run the initial setup script**:
-
-```bash
-./scripts/setup.sh
-```
+Follow the steps in [Setup Guide for SAP Testing Automation Framework](./SETUP.MD) to set up the framework on a management server.
### 2. Configuration
diff --git a/docs/SETUP.MD b/docs/SETUP.MD
new file mode 100644
index 00000000..2b12804b
--- /dev/null
+++ b/docs/SETUP.MD
@@ -0,0 +1,101 @@
+
+# Setup Guide for SAP Testing Automation Framework
+
+## Technical Requirements for running Automation Framework
+
+To run the SAP Testing Automation Framework, you must meet certain prerequisites and follow technical requirements.
+
+### SAP System Deployment on Microsoft Azure
+
+- The SAP system must be hosted on Microsoft Azure Infrastructure-as-a-Service (IaaS).
+- The SAP system deployed should follow SAP on Azure best practices as outlined in:
+ - [SAP HANA high availability on Azure Virtual Machine](https://learn.microsoft.com/azure/sap/workloads/sap-high-availability-guide-start).
+ - [SAP Netweaver high availability on Azure Virtual Machine](https://learn.microsoft.com/azure/sap/workloads/sap-high-availability-guide-start)
+
+### Management server
+
+The SAP Testing Automation Framework requires a jumpbox or management server with the following setup:
+
+- **Operating System**: Supported (Ubuntu 22.04 LTS, SLES 15 SP4, 15 SP6, REDHAT 9.4).
+- **Location**: Must be deployed on Azure.
+
+### Azure RBAC
+
+For the framework to access the properties of the Azure Load Balancer in a high availability SAP system on Azure, the management server must have a Reader role assigned to the Load Balancer. This can be done using either a system-assigned or user-assigned managed identity.
+
+#### Configuring access using system-assigned managed identity
+
+1. Enable system managed identity on the management server by following the steps in [Configure managed identities on Azure VMs](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-configure-managed-identities?pivots=qs-configure-portal-windows-vm#system-assigned-managed-identity).
+1. Open the Azure Load Balancer used for the high availability deployment of your SAP system on Azure.
+1. In the Azure Load Balancer panel, go to Access control (IAM).
+1. Follow steps from [Use managed identity to access Azure Resource](https://learn.microsoft.com/en-us/azure/role-based-access-control/role-assignments-portal) to complete the configuration.
+
+#### Configuring access using user-assigned managed identity
+
+1. Create user-assigned managed identity as described in [manage user-assigned managed identities](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-manage-user-assigned-managed-identities?pivots=identity-mi-methods-azp#create-a-user-assigned-managed-identity)
+1. Assign user-assigned managed identity to management server as described in [configure managed identities on Azure VMs](https://learn.microsoft.com/entra/identity/managed-identities-azure-resources/how-to-configure-managed-identities?pivots=qs-configure-portal-windows-vm#assign-a-user-assigned-managed-identity-to-an-existing-vm)
+1. Open the Azure Load Balancer used for the high availability deployment of your SAP system on Azure.
+1. In the Azure Load Balancer panel, go to Access control (IAM).
+1. Assign the required role to the user-assigned managed identity by following the steps in [assign roles using Azure portal](https://learn.microsoft.com/azure/role-based-access-control/role-assignments-portal).
+
+### Network Connectivity
+
+The management server must have network connectivity to the SAP system to perform tests and validations. You can establish this connection by peering the networks as outlined in [manage a virtual network peering](https://learn.microsoft.com/azure/virtual-network/virtual-network-manage-peering?tabs=peering-portal).
+
+### Analytics Integration (optional)
+
+- **Analytics Integration** [Telemetry Setup Information](./TELEMETRY_SETUP.md)
+ - Azure Log Analytics
+ - Azure Data Explorer
+
+## Getting Started
+
+### 1. Environment Setup
+
+To set up your environment in management server, follow these steps:
+
+1.1. **Login to the management server**:
+
+Ensure you are logged into the management server that is connected to the SAP system's virtual network.
+
+1.2. **Install git on management server**:
+
+```bash
+# Debian/Ubuntu
+sudo su -
+apt-get install git
+
+# RHEL/CentOS
+sudo su -
+yum install git
+
+# SUSE
+sudo su -
+zypper install git
+```
+
+1.3. **Fork and clone the repository**:
+
+```bash
+# sudo to root
+sudo su -
+
+# First, visit https://github.com/Azure/sap-automation-qa in your browser
+# Click the "Fork" button in the top-right corner to create a fork in your GitHub account
+
+# Clone your fork of the repository (replace GITHUB-USERNAME with your GitHub username)
+git clone https://github.com/GITHUB-USERNAME/sap-automation-qa.git
+cd sap-automation-qa
+```
+
+1.4. **Run the initial setup script**:
+
+```bash
+./scripts/setup.sh
+```
+
+1.5. **Activate the python environment**:
+
+```bash
+source .venv/bin/activate
+```
diff --git a/docs/DB_HIGH_AVAILABILITY.md b/docs/high_availability/DB_HIGH_AVAILABILITY.md
similarity index 96%
rename from docs/DB_HIGH_AVAILABILITY.md
rename to docs/high_availability/DB_HIGH_AVAILABILITY.md
index 2f738269..f22c1fff 100644
--- a/docs/DB_HIGH_AVAILABILITY.md
+++ b/docs/high_availability/DB_HIGH_AVAILABILITY.md
@@ -4,8 +4,8 @@
| Test Case | Type | Description | More Info |
|-----------|-----------|-------------| --------- |
-| HA Parameters Validation | Configuration | The HA parameter validation test validates HA configuration including Corosync settings, Pacemaker resources, SBD device configuration, and HANA system replication setup. | [ha-config.yml](../src/roles/ha_db_hana/tasks/ha-config.yml) |
-| Azure Load Balancer | Configuration | The Azure LB configuration test validates Azure Load Balancer setup including health probe configuration, backend pool settings, load balancing rules, and frontend IP configuration. | [azure-lb.yml](../src/roles/ha_db_hana/tasks/azure-lb.yml) |
+| HA Parameters Validation | Configuration | The HA parameter validation test validates HA configuration including Corosync settings, Pacemaker resources, SBD device configuration, and HANA system replication setup. | [ha-config.yml](../../src/roles/ha_db_hana/tasks/ha-config.yml) |
+| Azure Load Balancer | Configuration | The Azure LB configuration test validates Azure Load Balancer setup including health probe configuration, backend pool settings, load balancing rules, and frontend IP configuration. | [azure-lb.yml](../../src/roles/ha_db_hana/tasks/azure-lb.yml) |
| Resource Migration | Failover | The Resource Migration test validates planned failover scenarios by executing controlled resource movement between HANA nodes. It performs a graceful migration of the primary HANA resources to the secondary node, verifies proper role changes, ensures cluster maintains stability throughout the transition, and validates complete data synchronization after migration. | [resource-migration.md](./pseudocode/resource-migration.md) |
| Primary Node Crash | Failover | The Primary Index Server Crash test simulates cluster behavior when the HANA index server crashes on the primary node. It simulates an index server failure by forcefully terminating the process, then verifies automatic failover to the secondary node, monitors system replication status, and confirms service recovery without data loss. | [node-crash.md](./pseudocode/node-crash.md) |
| Block Network | Network | The Block Network test validates cluster behavior during network partition scenarios by implementing iptables rules to block communication between primary and secondary HANA nodes. It verifies split-brain prevention mechanisms, validates proper failover execution when nodes become isolated, and ensures cluster stability and data consistency after network connectivity is restored. | [block-network.md](./pseudocode/block-network.md) |
diff --git a/docs/SCS_HIGH_AVAILABILITY.md b/docs/high_availability/SCS_HIGH_AVAILABILITY.md
similarity index 78%
rename from docs/SCS_HIGH_AVAILABILITY.md
rename to docs/high_availability/SCS_HIGH_AVAILABILITY.md
index 471ebb45..cfc1defc 100644
--- a/docs/SCS_HIGH_AVAILABILITY.md
+++ b/docs/high_availability/SCS_HIGH_AVAILABILITY.md
@@ -4,15 +4,15 @@
| Test Case | Type | Description | More Info |
|------------------------------|---------------|-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------|
-| HA Parameters Validation | Configuration | The HA parameter validation test validates HA configuration including Corosync settings, Pacemaker resources, SBD device configuration, and SCS system replication setup. | [ha-config.yml](../src/roles/ha_scs/tasks/ha-config.yml) |
-| Azure Load Balancer | Configuration | The Azure LB configuration test validates Azure Load Balancer setup including health probe configuration, backend pool settings, load balancing rules, and frontend IP configuration. | [azure-lb.yml](../src/roles/ha_scs/tasks/azure-lb.yml) |
-| SAPControl Config Validation | Configuration | The SAPControl Config Validation test runs multiple sapcontrol commands to validate the SCS configuration. It executes commands like HAGetFailoverConfig, HACheckFailoverConfig, and HACheckConfig, capturing their outputs and statuses to ensure proper configuration and functionality. | [sapcontrol-config.yml](../src/roles/ha_scs/tasks/sapcontrol-config.yml) |
-| Resource Migration | Failover | The Resource Migration test validates planned failover scenarios by controlling resource movement between SCS nodes, ensuring proper role changes. | [ascs-migration.yml](../src/roles/ha_scs/tasks/ascs-migration.yml) |
-| ASCS Node Crash | Failover | The ASCS Node Crash test simulates cluster behavior when the ASCS node crashes. It simulates an ASCS node failure by forcefully terminating the process, then verifies automatic failover to the ERS node, monitors system replication status, and confirms service recovery. | [ascs-node-crash.yml](../src/roles/ha_scs/tasks/ascs-node-crash.yml) |
-| Block Network Communication | Network | The Block Network test validates cluster behavior during network partition scenarios by implementing iptables rules to block communication between ASCS and ERS nodes. It verifies split-brain prevention mechanisms, validates proper failover execution when nodes become isolated, and ensures cluster stability after network connectivity is restored. | [block-network.yml](../src/roles/ha_scs/tasks/block-network.yml) |
-| Kill Message Server Process | Process | The Message Server Process Kill test simulates failure of the message server process on the ASCS node by forcefully terminating it using the kill -9 signal. It verifies proper cluster reaction, automatic failover to the ERS node, and ensures service continuity after the process failure. | [kill-message-server.yml](../src/roles/ha_scs/tasks/kill-message-server.yml) |
-| Kill Enqueue Server Process | Process | The Enqueue Server Process Kill test simulates failure of the enqueue server process on the ASCS node by forcefully terminating it using the kill -9 signal. It validates proper cluster behavior, automatic failover execution. | [kill-enqueue-server.yml](../src/roles/ha_scs/tasks/kill-enqueue-server.yml) |
-| Kill Enqueue Replication Server Process | Process | The Enqueue Replication Server Process Kill test simulates failure of the replication server process on the ERS node by forcefully terminating it using the kill -9 signal. This test handles both ENSA1 and ENSA2 architectures. It validates the automatic restart of the process. | [kill-enqueue-replication.yml](../src/roles/ha_scs/tasks/kill-enqueue-replication.yml) |
-| Kill sapstartsrv Process for ASCS | Process | The sapstartsrv Process Kill test simulates failure of the SAP Start Service for the ASCS instance by forcefully terminating it using the kill -9 signal. It validates proper cluster reaction, automatic failover to the ERS node, and verifies service restoration after the process failure. | [kill-sapstartsrv.yml](../src/roles/ha_scs/tasks/kill-sapstartsrv.yml) |
-| Manual Restart of ASCS Instance | Control | The Manual Restart test validates cluster behavior when the ASCS instance is manually stopped using sapcontrol. It verifies proper cluster reaction to a controlled instance shutdown, ensures automatic failover to the ERS node, and confirms service continuity throughout the operation. | [manual-restart.yml](../src/roles/ha_scs/tasks/manual-restart.yml) |
-| HAFailoverToNode Test | Control | The HAFailoverToNode test validates SAP's built-in high availability functionality by using the sapcontrol command to trigger a controlled failover. It executes 'HAFailoverToNode' as the SAP administrator user, which initiates a clean migration of the ASCS instance to another node. | [ha-failover-to-node.yml](../src/roles/ha_scs/tasks/ha-failover-to-node.yml) |
+| HA Parameters Validation | Configuration | The HA parameter validation test validates HA configuration including Corosync settings, Pacemaker resources, SBD device configuration, and SCS system replication setup. | [ha-config.yml](../../src/roles/ha_scs/tasks/ha-config.yml) |
+| Azure Load Balancer | Configuration | The Azure LB configuration test validates Azure Load Balancer setup including health probe configuration, backend pool settings, load balancing rules, and frontend IP configuration. | [azure-lb.yml](../../src/roles/ha_scs/tasks/azure-lb.yml) |
+| SAPControl Config Validation | Configuration | The SAPControl Config Validation test runs multiple sapcontrol commands to validate the SCS configuration. It executes commands like HAGetFailoverConfig, HACheckFailoverConfig, and HACheckConfig, capturing their outputs and statuses to ensure proper configuration and functionality. | [sapcontrol-config.yml](../../src/roles/ha_scs/tasks/sapcontrol-config.yml) |
+| Resource Migration | Failover | The Resource Migration test validates planned failover scenarios by controlling resource movement between SCS nodes, ensuring proper role changes. | [ascs-migration.yml](../../src/roles/ha_scs/tasks/ascs-migration.yml) |
+| ASCS Node Crash | Failover | The ASCS Node Crash test simulates cluster behavior when the ASCS node crashes. It simulates an ASCS node failure by forcefully terminating the process, then verifies automatic failover to the ERS node, monitors system replication status, and confirms service recovery. | [ascs-node-crash.yml](../../src/roles/ha_scs/tasks/ascs-node-crash.yml) |
+| Block Network Communication | Network | The Block Network test validates cluster behavior during network partition scenarios by implementing iptables rules to block communication between ASCS and ERS nodes. It verifies split-brain prevention mechanisms, validates proper failover execution when nodes become isolated, and ensures cluster stability after network connectivity is restored. | [block-network.yml](../../src/roles/ha_scs/tasks/block-network.yml) |
+| Kill Message Server Process | Process | The Message Server Process Kill test simulates failure of the message server process on the ASCS node by forcefully terminating it using the kill -9 signal. It verifies proper cluster reaction, automatic failover to the ERS node, and ensures service continuity after the process failure. | [kill-message-server.yml](../../src/roles/ha_scs/tasks/kill-message-server.yml) |
+| Kill Enqueue Server Process | Process | The Enqueue Server Process Kill test simulates failure of the enqueue server process on the ASCS node by forcefully terminating it using the kill -9 signal. It validates proper cluster behavior, automatic failover execution. | [kill-enqueue-server.yml](../../src/roles/ha_scs/tasks/kill-enqueue-server.yml) |
+| Kill Enqueue Replication Server Process | Process | The Enqueue Replication Server Process Kill test simulates failure of the replication server process on the ERS node by forcefully terminating it using the kill -9 signal. This test handles both ENSA1 and ENSA2 architectures. It validates the automatic restart of the process. | [kill-enqueue-replication.yml](../../src/roles/ha_scs/tasks/kill-enqueue-replication.yml) |
+| Kill sapstartsrv Process for ASCS | Process | The sapstartsrv Process Kill test simulates failure of the SAP Start Service for the ASCS instance by forcefully terminating it using the kill -9 signal. It validates proper cluster reaction, automatic failover to the ERS node, and verifies service restoration after the process failure. | [kill-sapstartsrv.yml](../../src/roles/ha_scs/tasks/kill-sapstartsrv.yml) |
+| Manual Restart of ASCS Instance | Control | The Manual Restart test validates cluster behavior when the ASCS instance is manually stopped using sapcontrol. It verifies proper cluster reaction to a controlled instance shutdown, ensures automatic failover to the ERS node, and confirms service continuity throughout the operation. | [manual-restart.yml](../../src/roles/ha_scs/tasks/manual-restart.yml) |
+| HAFailoverToNode Test | Control | The HAFailoverToNode test validates SAP's built-in high availability functionality by using the sapcontrol command to trigger a controlled failover. It executes 'HAFailoverToNode' as the SAP administrator user, which initiates a clean migration of the ASCS instance to another node. | [ha-failover-to-node.yml](../../src/roles/ha_scs/tasks/ha-failover-to-node.yml) |
diff --git a/docs/pseudocode/block-network.md b/docs/high_availability/pseudocode/block-network.md
similarity index 100%
rename from docs/pseudocode/block-network.md
rename to docs/high_availability/pseudocode/block-network.md
diff --git a/docs/pseudocode/crash-index.md b/docs/high_availability/pseudocode/crash-index.md
similarity index 100%
rename from docs/pseudocode/crash-index.md
rename to docs/high_availability/pseudocode/crash-index.md
diff --git a/docs/pseudocode/echo-b.md b/docs/high_availability/pseudocode/echo-b.md
similarity index 100%
rename from docs/pseudocode/echo-b.md
rename to docs/high_availability/pseudocode/echo-b.md
diff --git a/docs/pseudocode/fs-freeze.md b/docs/high_availability/pseudocode/fs-freeze.md
similarity index 100%
rename from docs/pseudocode/fs-freeze.md
rename to docs/high_availability/pseudocode/fs-freeze.md
diff --git a/docs/pseudocode/ha-failover-to-node.md b/docs/high_availability/pseudocode/ha-failover-to-node.md
similarity index 100%
rename from docs/pseudocode/ha-failover-to-node.md
rename to docs/high_availability/pseudocode/ha-failover-to-node.md
diff --git a/docs/pseudocode/kill-message-server.md b/docs/high_availability/pseudocode/kill-message-server.md
similarity index 100%
rename from docs/pseudocode/kill-message-server.md
rename to docs/high_availability/pseudocode/kill-message-server.md
diff --git a/docs/pseudocode/manual-restart.md b/docs/high_availability/pseudocode/manual-restart.md
similarity index 100%
rename from docs/pseudocode/manual-restart.md
rename to docs/high_availability/pseudocode/manual-restart.md
diff --git a/docs/pseudocode/node-crash.md b/docs/high_availability/pseudocode/node-crash.md
similarity index 100%
rename from docs/pseudocode/node-crash.md
rename to docs/high_availability/pseudocode/node-crash.md
diff --git a/docs/pseudocode/node-kill.md b/docs/high_availability/pseudocode/node-kill.md
similarity index 100%
rename from docs/pseudocode/node-kill.md
rename to docs/high_availability/pseudocode/node-kill.md
diff --git a/docs/pseudocode/resource-migration.md b/docs/high_availability/pseudocode/resource-migration.md
similarity index 100%
rename from docs/pseudocode/resource-migration.md
rename to docs/high_availability/pseudocode/resource-migration.md
diff --git a/docs/pseudocode/sapcontrol-config.md b/docs/high_availability/pseudocode/sapcontrol-config.md
similarity index 100%
rename from docs/pseudocode/sapcontrol-config.md
rename to docs/high_availability/pseudocode/sapcontrol-config.md
diff --git a/docs/pseudocode/sbd-fencing.md b/docs/high_availability/pseudocode/sbd-fencing.md
similarity index 100%
rename from docs/pseudocode/sbd-fencing.md
rename to docs/high_availability/pseudocode/sbd-fencing.md
From 60656ed5df34a1f5ee2b6ea7401294274c5ba553 Mon Sep 17 00:00:00 2001
From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com>
Date: Wed, 1 Oct 2025 11:14:04 -0700
Subject: [PATCH 5/6] Bump pandas from 2.3.1 to 2.3.3 (#118)
Bumps [pandas](https://github.com/pandas-dev/pandas) from 2.3.1 to 2.3.3.
- [Release notes](https://github.com/pandas-dev/pandas/releases)
- [Commits](https://github.com/pandas-dev/pandas/compare/v2.3.1...v2.3.3)
---
updated-dependencies:
- dependency-name: pandas
dependency-version: 2.3.3
dependency-type: direct:production
update-type: version-update:semver-patch
...
Signed-off-by: dependabot[bot]
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Devansh Jain <86314060+devanshjainms@users.noreply.github.com>
---
requirements.txt | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index bd3a4766..db4736b5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -144,7 +144,7 @@ packaging==25.0
# ansible-runner
# black
# pytest
-pandas==2.3.2
+pandas==2.3.3
# via -r requirements.in
pathspec==0.12.1
# via
From b1b0f53b38e1a5dc587841d5320c7273e323dfad Mon Sep 17 00:00:00 2001
From: Devansh Jain <86314060+devanshjainms@users.noreply.github.com>
Date: Fri, 3 Oct 2025 09:34:56 -0700
Subject: [PATCH 6/6] Updated commands to get the automated-register and
priority fencing delay (#122)
* Add provider value resolution and enhance parameter status determination logic
* Refactor cluster parameter retrieval logic and update related tests
* Refactor command imports and update cluster status retrieval logic to use new parameters
* Refactor HANA resource handling by renaming parameters and updating related logic across multiple files
* Refactor command execution logic to simplify parameter value retrieval
* Fix typo in method name and update related test cases for cluster parameter retrieval
* Fix inconsistent quote style in test case for cluster parameter retrieval
* Update cluster parameter retrieval to include PRIORITY_FENCING_DELAY and fix typo in test case name
---
src/module_utils/commands.py | 18 ++-
src/modules/get_cluster_status_db.py | 46 +++---
src/modules/get_pcmk_properties_scs.py | 135 ++++++++++++------
src/roles/ha_db_hana/tasks/block-network.yml | 16 ++-
.../ha_db_hana/tasks/files/constants.yaml | 1 -
src/roles/ha_db_hana/tasks/fs-freeze.yml | 6 +-
.../ha_db_hana/tasks/primary-crash-index.yml | 9 +-
src/roles/ha_db_hana/tasks/primary-echo-b.yml | 9 +-
.../ha_db_hana/tasks/primary-node-crash.yml | 6 +-
.../ha_db_hana/tasks/primary-node-kill.yml | 9 +-
.../ha_db_hana/tasks/resource-migration.yml | 36 ++---
src/roles/ha_db_hana/tasks/sbd-fencing.yml | 6 +-
.../tasks/secondary-crash-index.yml | 6 +-
.../ha_db_hana/tasks/secondary-echo-b.yml | 6 +-
.../ha_db_hana/tasks/secondary-node-kill.yml | 6 +-
src/roles/misc/tasks/cluster-report.yml | 3 +-
src/roles/misc/tasks/pre-validations-db.yml | 55 ++++---
src/vars/input-api.yaml | 6 +-
tests/modules/get_cluster_status_db_test.py | 23 +--
tests/modules/get_pcmk_properties_scs_test.py | 1 -
tests/roles/ha_db_hana/block_network_test.py | 6 +-
.../roles/ha_db_hana/primary_node_ops_test.py | 6 +-
.../ha_db_hana/resource_migration_test.py | 8 +-
.../ha_db_hana/secondary_node_ops_test.py | 6 +-
.../roles/mock_data/get_cluster_status_db.txt | 12 +-
.../secondary_get_cluster_status_db.txt | 12 +-
26 files changed, 301 insertions(+), 152 deletions(-)
diff --git a/src/module_utils/commands.py b/src/module_utils/commands.py
index c3aa5fb9..93ee3cc2 100644
--- a/src/module_utils/commands.py
+++ b/src/module_utils/commands.py
@@ -30,13 +30,21 @@
OperatingSystemFamily.SUSE: ["crm", "configure", "get_property", "stonith-action"],
}
-AUTOMATED_REGISTER = [
- "cibadmin",
- "--query",
- "--xpath",
- "//nvpair[@name='AUTOMATED_REGISTER']",
+AUTOMATED_REGISTER = lambda rsc: [
+ "crm_resource",
+ "--resource",
+ rsc,
+ "--get-parameter=AUTOMATED_REGISTER",
]
+PRIORITY_FENCING_DELAY = [
+ "crm_attribute",
+ "--type",
+ "crm_config",
+ "--name",
+ "priority-fencing-delay",
+ "--quiet",
+]
FREEZE_FILESYSTEM = lambda file_system, mount_point: [
"mount",
diff --git a/src/modules/get_cluster_status_db.py b/src/modules/get_cluster_status_db.py
index bc2da107..f44adc40 100644
--- a/src/modules/get_cluster_status_db.py
+++ b/src/modules/get_cluster_status_db.py
@@ -14,10 +14,10 @@
try:
from ansible.module_utils.get_cluster_status import BaseClusterStatusChecker
from ansible.module_utils.enums import OperatingSystemFamily, HanaSRProvider
- from ansible.module_utils.commands import AUTOMATED_REGISTER
+ from ansible.module_utils.commands import AUTOMATED_REGISTER, PRIORITY_FENCING_DELAY
except ImportError:
from src.module_utils.get_cluster_status import BaseClusterStatusChecker
- from src.module_utils.commands import AUTOMATED_REGISTER
+ from src.module_utils.commands import AUTOMATED_REGISTER, PRIORITY_FENCING_DELAY
from src.module_utils.enums import OperatingSystemFamily, HanaSRProvider
@@ -146,13 +146,15 @@ def __init__(
db_instance_number: str,
saphanasr_provider: HanaSRProvider,
ansible_os_family: OperatingSystemFamily,
- hana_resource_name: str = "",
+ hana_clone_resource_name: str = "",
+ hana_primitive_resource_name: str = "",
):
super().__init__(ansible_os_family)
self.database_sid = database_sid
self.saphanasr_provider = saphanasr_provider
self.db_instance_number = db_instance_number
- self.hana_resource_name = hana_resource_name
+ self.hana_clone_resource_name = hana_clone_resource_name
+ self.hana_primitive_resource_name = hana_primitive_resource_name
self.result.update(
{
"primary_node": "",
@@ -161,18 +163,28 @@ def __init__(
"replication_mode": "",
"primary_site_name": "",
"AUTOMATED_REGISTER": "false",
+ "PRIORITY_FENCING_DELAY": "",
}
)
- def _get_automation_register(self) -> None:
+ def _get_cluster_parameters(self) -> None:
"""
- Retrieves the value of the AUTOMATED_REGISTER attribute.
+ Retrieves the values of the AUTOMATED_REGISTER and PRIORITY_FENCING_DELAY attributes.
"""
- try:
- cmd_output = self.execute_command_subprocess(AUTOMATED_REGISTER).strip()
- self.result["AUTOMATED_REGISTER"] = ET.fromstring(cmd_output).get("value")
- except Exception:
- self.result["AUTOMATED_REGISTER"] = "unknown"
+ param_commands = {
+ "AUTOMATED_REGISTER": (
+ AUTOMATED_REGISTER(self.hana_primitive_resource_name)
+ if self.hana_primitive_resource_name
+ else AUTOMATED_REGISTER(self.hana_clone_resource_name)
+ ),
+ "PRIORITY_FENCING_DELAY": PRIORITY_FENCING_DELAY,
+ }
+
+ for param_name, command in param_commands.items():
+ try:
+ self.result[param_name] = self.execute_command_subprocess(command).strip()
+ except Exception:
+ self.result[param_name] = "unknown"
def _process_node_attributes(self, cluster_status_xml: ET.Element) -> Dict[str, Any]:
"""
@@ -209,8 +221,8 @@ def _process_node_attributes(self, cluster_status_xml: ET.Element) -> Dict[str,
HanaSRProvider.ANGI: {
"clone_attr": f"hana_{self.database_sid}_clone_state",
"sync_attr": (
- f"master-{self.hana_resource_name}"
- if self.hana_resource_name
+ f"master-{self.hana_clone_resource_name}"
+ if self.hana_clone_resource_name
else f"master-rsc_SAPHanaCon_{self.database_sid.upper()}"
+ f"_HDB{self.db_instance_number}"
),
@@ -281,7 +293,7 @@ def run(self) -> Dict[str, str]:
:rtype: Dict[str, str]
"""
result = super().run()
- self._get_automation_register()
+ self._get_cluster_parameters()
return result
@@ -294,7 +306,8 @@ def run_module() -> None:
database_sid=dict(type="str", required=True),
saphanasr_provider=dict(type="str", required=True),
db_instance_number=dict(type="str", required=True),
- hana_resource_name=dict(type="str", required=False),
+ hana_clone_resource_name=dict(type="str", required=False),
+ hana_primitive_resource_name=dict(type="str", required=False),
filter=dict(type="str", required=False, default="os_family"),
)
@@ -307,7 +320,8 @@ def run_module() -> None:
str(ansible_facts(module).get("os_family", "UNKNOWN")).upper()
),
db_instance_number=module.params["db_instance_number"],
- hana_resource_name=module.params.get("hana_resource_name", ""),
+ hana_clone_resource_name=module.params.get("hana_clone_resource_name", ""),
+ hana_primitive_resource_name=module.params.get("hana_primitive_resource_name", ""),
)
checker.run()
diff --git a/src/modules/get_pcmk_properties_scs.py b/src/modules/get_pcmk_properties_scs.py
index 59680f1c..c4d61349 100644
--- a/src/modules/get_pcmk_properties_scs.py
+++ b/src/modules/get_pcmk_properties_scs.py
@@ -246,6 +246,82 @@ def _validate_resource_constants(self):
return parameters
+ def _resolve_provider_values(self, expected_value: dict) -> list:
+ """
+ Resolve provider-specific values from a configuration dictionary.
+
+ This method handles the complex logic of extracting appropriate values
+ based on the NFS provider configuration. It supports both provider-specific
+ configurations and fallback to all available providers.
+
+ :param expected_value: Dictionary containing provider configurations
+ :type expected_value: dict
+ :return: List of resolved values for validation
+ :rtype: list
+ :raises TypeError: If expected_value is not a dictionary
+ """
+ if not isinstance(expected_value, dict):
+ raise TypeError("Expected value must be a dictionary for provider resolution")
+
+ provider_values = []
+ if self.nfs_provider and self.nfs_provider in expected_value:
+ provider_config = expected_value[self.nfs_provider]
+ provider_values = self._extract_values_from_config(provider_config)
+ else:
+ for _, provider_config in expected_value.items():
+ extracted_values = self._extract_values_from_config(provider_config)
+ if isinstance(extracted_values, list):
+ provider_values.extend(extracted_values)
+ else:
+ provider_values.append(extracted_values)
+
+ return provider_values if isinstance(provider_values, list) else [provider_values]
+
+ def _extract_values_from_config(self, provider_config):
+ """
+ Extract values from a provider configuration structure.
+
+ Handles various configuration formats:
+ - {"value": [list]} or {"value": "single"}
+ - [list] directly
+ - "single" value directly
+
+ :param provider_config: Configuration object to extract values from
+ :type provider_config: dict or list or str
+ :return: Extracted value(s)
+ :rtype: list or str
+ """
+ if isinstance(provider_config, dict) and "value" in provider_config:
+ return provider_config["value"]
+ elif isinstance(provider_config, (list, str)):
+ return provider_config
+ else:
+ return provider_config
+
+ def _compare_value_with_expectations(self, value: str, expected_values) -> str:
+ """
+ Compare a value against expected values and return test status.
+
+ :param value: The actual value to compare
+ :type value: str
+ :param expected_values: Expected value(s) for comparison
+ :type expected_values: str or list
+ :return: Test status (SUCCESS or ERROR)
+ :rtype: str
+ """
+ if isinstance(expected_values, list):
+ return (
+ TestStatus.SUCCESS.value
+ if str(value) in [str(v) for v in expected_values]
+ else TestStatus.ERROR.value
+ )
+ else:
+ return (
+ TestStatus.SUCCESS.value
+ if str(value) == str(expected_values)
+ else TestStatus.ERROR.value
+ )
+
def _determine_parameter_status(self, value, expected_value):
"""
Determine the status of a parameter with SCS-specific logic for NFS provider.
@@ -257,60 +333,31 @@ def _determine_parameter_status(self, value, expected_value):
:return: The status of the parameter.
:rtype: str
"""
+ # Handle tuple format (value, required)
if isinstance(expected_value, tuple):
expected_val, required = expected_value
if not required and (expected_val is None or value == ""):
return TestStatus.INFO.value
expected_value = expected_val
+ # Handle empty/null cases
if expected_value is None or value == "":
return TestStatus.INFO.value
+
+ # Handle simple string/list cases
elif isinstance(expected_value, (str, list)):
- if isinstance(expected_value, list):
- return (
- TestStatus.SUCCESS.value
- if str(value) in expected_value
- else TestStatus.ERROR.value
- )
- else:
- return (
- TestStatus.SUCCESS.value
- if str(value) == str(expected_value)
- else TestStatus.ERROR.value
- )
+ return self._compare_value_with_expectations(value, expected_value)
+
+ # Handle complex provider-based dictionary cases
elif isinstance(expected_value, dict):
- provider_values = []
- if self.nfs_provider and self.nfs_provider in expected_value:
- provider_config = expected_value[self.nfs_provider]
- if isinstance(provider_config, dict) and "value" in provider_config:
- provider_values = provider_config["value"]
- else:
- provider_values = provider_config
- else:
- # If provider is unknown/not set, collect all provider values
- for provider_key, provider_config in expected_value.items():
- if isinstance(provider_config, dict) and "value" in provider_config:
- if isinstance(provider_config["value"], list):
- provider_values.extend(provider_config["value"])
- else:
- provider_values.append(provider_config["value"])
- elif isinstance(provider_config, list):
- provider_values.extend(provider_config)
- else:
- provider_values.append(provider_config)
-
- if isinstance(provider_values, list):
- return (
- TestStatus.SUCCESS.value
- if str(value) in provider_values
- else TestStatus.ERROR.value
- )
- else:
- return (
- TestStatus.SUCCESS.value
- if str(value) == str(provider_values)
- else TestStatus.ERROR.value
- )
+ try:
+ provider_values = self._resolve_provider_values(expected_value)
+ return self._compare_value_with_expectations(value, provider_values)
+ except (TypeError, KeyError) as ex:
+ self.result["message"] += f"Error resolving provider values: {str(ex)} "
+ return TestStatus.ERROR.value
+
+ # Handle unexpected types
else:
return TestStatus.ERROR.value
diff --git a/src/roles/ha_db_hana/tasks/block-network.yml b/src/roles/ha_db_hana/tasks/block-network.yml
index 4ed49b67..6f8b88d3 100644
--- a/src/roles/ha_db_hana/tasks/block-network.yml
+++ b/src/roles/ha_db_hana/tasks/block-network.yml
@@ -23,6 +23,9 @@
- node_tier == "hana"
- pre_validations_status == "PASSED"
- cluster_status_pre.stonith_action == "reboot"
+ - cluster_status_pre.PRIORITY_FENCING_DELAY is defined
+ - cluster_status_pre.PRIORITY_FENCING_DELAY != ""
+ - cluster_status_pre.PRIORITY_FENCING_DELAY != "unknown"
block:
- name: "Test Execution: Block Network Communication"
when: ansible_hostname == cluster_status_pre.primary_node
@@ -93,7 +96,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution_primary
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -120,7 +124,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post_primary
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -140,7 +145,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution_secondary
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -158,7 +164,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post_secondary
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -227,6 +234,7 @@
test_case_details_from_test_case: {
"Pre Validations: Remove any location_constraints": "{{ location_constraints_results }}",
"Pre Validations: Validate HANA DB cluster status": "{{ cluster_status_pre }}",
+ "Pre Validations: priority-fencing-delay": "{{ cluster_status_pre.PRIORITY_FENCING_DELAY | default('Not Configured') }}",
"Pre Validations: CleanUp any failed resource": "{{ cleanup_failed_resource_pre }}",
"Cluster Status": "{{ cluster_status_pre }}",
}
diff --git a/src/roles/ha_db_hana/tasks/files/constants.yaml b/src/roles/ha_db_hana/tasks/files/constants.yaml
index f2865b5d..c3cd5660 100644
--- a/src/roles/ha_db_hana/tasks/files/constants.yaml
+++ b/src/roles/ha_db_hana/tasks/files/constants.yaml
@@ -731,7 +731,6 @@ OS_PARAMETERS:
# === Global INI ===
# Reading the global.ini file to get the provider and path for the SAPHanaSR resource agent
GLOBAL_INI:
- GLOBAL_INI:
SUSE:
SAPHanaSR:
provider:
diff --git a/src/roles/ha_db_hana/tasks/fs-freeze.yml b/src/roles/ha_db_hana/tasks/fs-freeze.yml
index 80eae09e..5779675d 100644
--- a/src/roles/ha_db_hana/tasks/fs-freeze.yml
+++ b/src/roles/ha_db_hana/tasks/fs-freeze.yml
@@ -59,7 +59,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -76,7 +77,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/primary-crash-index.yml b/src/roles/ha_db_hana/tasks/primary-crash-index.yml
index ac98e9d5..c9c3859f 100644
--- a/src/roles/ha_db_hana/tasks/primary-crash-index.yml
+++ b/src/roles/ha_db_hana/tasks/primary-crash-index.yml
@@ -57,7 +57,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -72,7 +73,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -122,7 +124,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/primary-echo-b.yml b/src/roles/ha_db_hana/tasks/primary-echo-b.yml
index 3a0634b0..cf85474f 100644
--- a/src/roles/ha_db_hana/tasks/primary-echo-b.yml
+++ b/src/roles/ha_db_hana/tasks/primary-echo-b.yml
@@ -49,7 +49,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
register: cluster_status_test_execution
@@ -64,7 +65,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
register: cluster_status_test_execution
@@ -110,7 +112,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/primary-node-crash.yml b/src/roles/ha_db_hana/tasks/primary-node-crash.yml
index 1a236183..3223369d 100644
--- a/src/roles/ha_db_hana/tasks/primary-node-crash.yml
+++ b/src/roles/ha_db_hana/tasks/primary-node-crash.yml
@@ -45,7 +45,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -89,7 +90,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/primary-node-kill.yml b/src/roles/ha_db_hana/tasks/primary-node-kill.yml
index 368dc1d1..fb462754 100644
--- a/src/roles/ha_db_hana/tasks/primary-node-kill.yml
+++ b/src/roles/ha_db_hana/tasks/primary-node-kill.yml
@@ -46,7 +46,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -63,7 +64,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -108,7 +110,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/resource-migration.yml b/src/roles/ha_db_hana/tasks/resource-migration.yml
index 0a1c380f..9f44334d 100644
--- a/src/roles/ha_db_hana/tasks/resource-migration.yml
+++ b/src/roles/ha_db_hana/tasks/resource-migration.yml
@@ -44,12 +44,12 @@
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
- - name: "Test Execution: Set fact the hana_resource_name"
+ - name: "Test Execution: Set fact the hana_clone_resource_name"
ansible.builtin.set_fact:
- hana_resource_name: "{{ hana_resource_id.stdout }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_id.stdout }}"
- name: "Test Execution: Get HANA resource id"
when: saphanasr_provider | default('SAPHanaSR') == "SAPHanaSR"
@@ -57,38 +57,38 @@
- name: "Try master resource ID"
ansible.builtin.shell: >-
set -o pipefail && {{ commands
- | selectattr('name','equalto','get_hana_resource_id')
+ | selectattr('name','equalto','get_hana_clone_resource_id')
| map(attribute=(ansible_os_family|upper))
| first
}}
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
rescue:
- name: "Try clone resource ID"
ansible.builtin.shell: >-
set -o pipefail && {{ commands
- | selectattr('name','equalto','get_hana_resource_id')
+ | selectattr('name','equalto','get_hana_clone_resource_id')
| map(attribute='REDHAT')
| first
}}
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
ignore_errors: true
always:
- name: "Test Execution: Set the resource name"
when:
- - hana_resource_id.rc == 0
- - hana_resource_id.stdout is defined
- - hana_resource_id.stdout | type_debug != 'NoneType'
- - hana_resource_id.stdout | trim | length > 1
+ - hana_clone_resource_id.rc == 0
+ - hana_clone_resource_id.stdout is defined
+ - hana_clone_resource_id.stdout | type_debug != 'NoneType'
+ - hana_clone_resource_id.stdout | trim | length > 1
ansible.builtin.set_fact:
- hana_resource_name: "{{ hana_resource_id.stdout }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_id.stdout }}"
- name: "Test Execution: Move the resource to the targeted node"
ansible.builtin.command: "{{ commands | selectattr(
@@ -104,7 +104,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -160,7 +161,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution_1
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/sbd-fencing.yml b/src/roles/ha_db_hana/tasks/sbd-fencing.yml
index a08e58c9..670def4d 100644
--- a/src/roles/ha_db_hana/tasks/sbd-fencing.yml
+++ b/src/roles/ha_db_hana/tasks/sbd-fencing.yml
@@ -60,7 +60,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
register: cluster_status_test_execution
@@ -77,7 +78,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/secondary-crash-index.yml b/src/roles/ha_db_hana/tasks/secondary-crash-index.yml
index 59515bea..bb97bfc4 100644
--- a/src/roles/ha_db_hana/tasks/secondary-crash-index.yml
+++ b/src/roles/ha_db_hana/tasks/secondary-crash-index.yml
@@ -57,7 +57,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -71,7 +72,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/secondary-echo-b.yml b/src/roles/ha_db_hana/tasks/secondary-echo-b.yml
index c5ef6da1..cf731e52 100644
--- a/src/roles/ha_db_hana/tasks/secondary-echo-b.yml
+++ b/src/roles/ha_db_hana/tasks/secondary-echo-b.yml
@@ -53,7 +53,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
register: cluster_status_test_execution
@@ -67,7 +68,8 @@
operation_step: "post_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/ha_db_hana/tasks/secondary-node-kill.yml b/src/roles/ha_db_hana/tasks/secondary-node-kill.yml
index 3c3e510c..94ec9c68 100644
--- a/src/roles/ha_db_hana/tasks/secondary-node-kill.yml
+++ b/src/roles/ha_db_hana/tasks/secondary-node-kill.yml
@@ -51,7 +51,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_test_execution
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
@@ -65,7 +66,8 @@
operation_step: "test_execution"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_post
retries: "{{ default_retries }}"
delay: "{{ default_delay }}"
diff --git a/src/roles/misc/tasks/cluster-report.yml b/src/roles/misc/tasks/cluster-report.yml
index 4467980c..ad9cd805 100644
--- a/src/roles/misc/tasks/cluster-report.yml
+++ b/src/roles/misc/tasks/cluster-report.yml
@@ -12,7 +12,8 @@
operation_step: "cluster_report_collection"
database_sid: "{{ db_sid | lower | default('') }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status
failed_when: cluster_status.primary_node == ""
diff --git a/src/roles/misc/tasks/pre-validations-db.yml b/src/roles/misc/tasks/pre-validations-db.yml
index da6f1435..5c572943 100644
--- a/src/roles/misc/tasks/pre-validations-db.yml
+++ b/src/roles/misc/tasks/pre-validations-db.yml
@@ -32,54 +32,74 @@
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
- - name: "Pre validation: Set fact the hana_resource_name"
+ - name: "Pre validation: Set fact the hana_clone_resource_name"
ansible.builtin.set_fact:
- hana_resource_name: "{{ hana_resource_id.stdout }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_id.stdout }}"
- - name: "Pre validation: Get HANA resource id"
+ - name: "Pre validation: Get HANA Clone resource id"
when: saphanasr_provider | default('SAPHanaSR') == "SAPHanaSR"
block:
- name: "Try master resource ID"
become: true
ansible.builtin.shell: >-
set -o pipefail && {{ commands
- | selectattr('name','equalto','get_hana_resource_id')
+ | selectattr('name','equalto','get_hana_clone_resource_id')
| map(attribute=(ansible_os_family|upper))
| first
}}
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
rescue:
- name: "Try clone resource ID"
become: true
ansible.builtin.shell: >-
set -o pipefail && {{ commands
- | selectattr('name','equalto','get_hana_resource_id')
+ | selectattr('name','equalto','get_hana_clone_resource_id')
| map(attribute='REDHAT')
| first
}}
args:
executable: /bin/bash
changed_when: false
- register: hana_resource_id
- failed_when: hana_resource_id.rc != 0
+ register: hana_clone_resource_id
+ failed_when: hana_clone_resource_id.rc != 0
ignore_errors: true
always:
- name: "Test Execution: Set the resource name"
when:
- - hana_resource_id.rc == 0
- - hana_resource_id.stdout is defined
- - hana_resource_id.stdout | type_debug != 'NoneType'
- - hana_resource_id.stdout | trim | length > 1
+ - hana_clone_resource_id.rc == 0
+ - hana_clone_resource_id.stdout is defined
+ - hana_clone_resource_id.stdout | type_debug != 'NoneType'
+ - hana_clone_resource_id.stdout | trim | length > 1
ansible.builtin.set_fact:
- hana_resource_name: "{{ hana_resource_id.stdout }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_id.stdout }}"
+ - name: "Pre validation: Get HANA Primitive resource id"
+ when: saphanasr_provider | default('SAPHanaSR') == "SAPHanaSR"
+ block:
+ - name: "Pre validation: Get HANA Primitive resource id"
+ become: true
+ ansible.builtin.shell: >-
+ set -o pipefail && {{ commands
+ | selectattr('name','equalto','get_hana_primitive_resource_id')
+ | map(attribute=(ansible_os_family|upper))
+ | first
+ }}
+ args:
+ executable: /bin/bash
+ changed_when: false
+ register: hana_primitive_resource_id
+ failed_when: hana_primitive_resource_id.rc != 0
+
+ - name: "Pre validation: Set fact the hana_primitive_resource_name"
+ ansible.builtin.set_fact:
+ hana_primitive_resource_name: "{{ hana_primitive_resource_id.stdout }}"
- name: "Pre Validation: Validate HANA DB cluster status on primary node"
become: true
@@ -88,7 +108,8 @@
operation_step: "pre_failover"
database_sid: "{{ db_sid | lower }}"
saphanasr_provider: "{{ saphanasr_provider | default('SAPHanaSR') }}"
- hana_resource_name: "{{ hana_resource_name | default('') }}"
+ hana_clone_resource_name: "{{ hana_clone_resource_name | default('') }}"
+ hana_primitive_resource_name: "{{ hana_primitive_resource_name | default('') }}"
register: cluster_status_pre
until: cluster_status_pre.primary_node != "" or
cluster_status_pre.secondary_node != ""
diff --git a/src/vars/input-api.yaml b/src/vars/input-api.yaml
index 58e7a73c..0b535dfb 100644
--- a/src/vars/input-api.yaml
+++ b/src/vars/input-api.yaml
@@ -267,7 +267,11 @@ ascs_stonith_timeout: 120
# Commands for HANA DB HA Test Cases based on OS family
commands:
- - name: get_hana_resource_id
+ - name: get_hana_primitive_resource_id
+ SUSE: "cibadmin --query --xpath \"//primitive[@type='SAPHana']\" --node-path | grep -oP \"master\\[@id='\\K[^']+\""
+ REDHAT: "cibadmin --query --xpath \"//primitive[@type='SAPHana']\" --node-path | grep -oP \"primitive\\[@id='\\K[^']+\""
+
+ - name: get_hana_clone_resource_id
SUSE: "cibadmin --query --xpath \"//primitive[@type='SAPHana']\" --node-path | grep -oP \"master\\[@id='\\K[^']+\""
REDHAT: "cibadmin --query --xpath \"//primitive[@type='SAPHana']\" --node-path | grep -oP \"clone\\[@id='\\K[^']+\""
diff --git a/tests/modules/get_cluster_status_db_test.py b/tests/modules/get_cluster_status_db_test.py
index b7c413fc..b5448506 100644
--- a/tests/modules/get_cluster_status_db_test.py
+++ b/tests/modules/get_cluster_status_db_test.py
@@ -32,7 +32,8 @@ def hana_checker_classic(self):
ansible_os_family=OperatingSystemFamily.REDHAT,
saphanasr_provider=HanaSRProvider.SAPHANASR,
db_instance_number="00",
- hana_resource_name="rsc_SAPHanaCon_TEST_HDB00",
+ hana_clone_resource_name="rsc_SAPHanaCon_TEST_HDB00",
+ hana_primitive_resource_name="rsc_SAPHanaPrm_TEST_HDB00",
)
@pytest.fixture
@@ -48,12 +49,13 @@ def hana_checker_angi(self):
ansible_os_family=OperatingSystemFamily.SUSE,
saphanasr_provider=HanaSRProvider.ANGI,
db_instance_number="00",
- hana_resource_name="rsc_SAPHanaCon_TEST_HDB00",
+ hana_clone_resource_name="rsc_SAPHanaCon_TEST_HDB00",
+ hana_primitive_resource_name="rsc_SAPHanaCon_TEST_HDB00",
)
- def test_get_automation_register(self, mocker, hana_checker_classic):
+ def test_get_cluster_pramaeters(self, mocker, hana_checker_classic):
"""
- Test the _get_automation_register method.
+ Test the _get_cluster_parameters method.
:param mocker: Mocking library for Python.
:type mocker: _mocker.MagicMock
@@ -63,17 +65,16 @@ def test_get_automation_register(self, mocker, hana_checker_classic):
mocker.patch.object(
hana_checker_classic,
"execute_command_subprocess",
- return_value='',
+ return_value="true",
)
- hana_checker_classic._get_automation_register()
+ hana_checker_classic._get_cluster_parameters()
assert hana_checker_classic.result["AUTOMATED_REGISTER"] == "true"
- def test_get_automation_register_exception(self, mocker, hana_checker_classic):
+ def test_get_cluster_parameters_exception(self, mocker, hana_checker_classic):
"""
- Test the _get_automation_register method when an exception occurs.
+ Test the _get_cluster_parameters method when an exception occurs.
:param mocker: Mocking library for Python.
:type mocker: _mocker.MagicMock
@@ -84,7 +85,7 @@ def test_get_automation_register_exception(self, mocker, hana_checker_classic):
hana_checker_classic, "execute_command_subprocess", side_effect=Exception("Test error")
)
- hana_checker_classic._get_automation_register()
+ hana_checker_classic._get_cluster_parameters()
assert hana_checker_classic.result["AUTOMATED_REGISTER"] == "unknown"
@@ -257,7 +258,7 @@ def test_run(self, mocker, hana_checker_classic):
return_value={"status": "PASSED"},
)
- mock_get_automation = mocker.patch.object(hana_checker_classic, "_get_automation_register")
+ mock_get_automation = mocker.patch.object(hana_checker_classic, "_get_cluster_parameters")
result = hana_checker_classic.run()
diff --git a/tests/modules/get_pcmk_properties_scs_test.py b/tests/modules/get_pcmk_properties_scs_test.py
index 51bac43e..663605d4 100644
--- a/tests/modules/get_pcmk_properties_scs_test.py
+++ b/tests/modules/get_pcmk_properties_scs_test.py
@@ -369,7 +369,6 @@ def test_determine_parameter_status_with_list_expected_value(self, validator):
status = validator._determine_parameter_status(
"10.0.1.101", (["10.0.1.100", "10.0.1.101"], False)
)
- print(f"Actual status: {status}, Expected: {TestStatus.SUCCESS.value}")
assert status == TestStatus.SUCCESS.value
def test_determine_parameter_status_info_cases(self, validator):
diff --git a/tests/roles/ha_db_hana/block_network_test.py b/tests/roles/ha_db_hana/block_network_test.py
index 8de1d413..012b218b 100644
--- a/tests/roles/ha_db_hana/block_network_test.py
+++ b/tests/roles/ha_db_hana/block_network_test.py
@@ -34,7 +34,11 @@ def test_environment(self, ansible_inventory):
commands = [
{
- "name": "get_hana_resource_id",
+ "name": "get_hana_clone_resource_id",
+ "SUSE": "cibadmin --query --scope resources",
+ },
+ {
+ "name": "get_hana_primitive_resource_id",
"SUSE": "cibadmin --query --scope resources",
},
{
diff --git a/tests/roles/ha_db_hana/primary_node_ops_test.py b/tests/roles/ha_db_hana/primary_node_ops_test.py
index a265c1d0..98ca64b6 100644
--- a/tests/roles/ha_db_hana/primary_node_ops_test.py
+++ b/tests/roles/ha_db_hana/primary_node_ops_test.py
@@ -100,7 +100,11 @@ def test_environment(self, ansible_inventory, task_type):
commands = [
{
- "name": "get_hana_resource_id",
+ "name": "get_hana_clone_resource_id",
+ "SUSE": "cibadmin --query --scope resources",
+ },
+ {
+ "name": "get_hana_primitive_resource_id",
"SUSE": "cibadmin --query --scope resources",
},
{
diff --git a/tests/roles/ha_db_hana/resource_migration_test.py b/tests/roles/ha_db_hana/resource_migration_test.py
index a9a34493..4ee4fe6b 100644
--- a/tests/roles/ha_db_hana/resource_migration_test.py
+++ b/tests/roles/ha_db_hana/resource_migration_test.py
@@ -53,11 +53,15 @@ def test_environment(self, ansible_inventory):
commands = [
{
"name": "resource_migration_cmd",
- "SUSE": "crm resource move {{ hana_resource_name | default('msl_SAPHana_' ~ "
+ "SUSE": "crm resource move {{ hana_clone_resource_name | default('msl_SAPHana_' ~ "
"(db_sid | upper) ~ '_HDB' ~ db_instance_number) }} db02 force",
},
{
- "name": "get_hana_resource_id",
+ "name": "get_hana_clone_resource_id",
+ "SUSE": "cibadmin --query --scope resources",
+ },
+ {
+ "name": "get_hana_primitive_resource_id",
"SUSE": "cibadmin --query --scope resources",
},
{
diff --git a/tests/roles/ha_db_hana/secondary_node_ops_test.py b/tests/roles/ha_db_hana/secondary_node_ops_test.py
index c24d1558..e0964dc2 100644
--- a/tests/roles/ha_db_hana/secondary_node_ops_test.py
+++ b/tests/roles/ha_db_hana/secondary_node_ops_test.py
@@ -71,7 +71,11 @@ def test_environment(self, ansible_inventory, task_type):
commands = [
{
- "name": "get_hana_resource_id",
+ "name": "get_hana_clone_resource_id",
+ "SUSE": "cibadmin --query --scope resources",
+ },
+ {
+ "name": "get_hana_primitive_resource_id",
"SUSE": "cibadmin --query --scope resources",
},
{
diff --git a/tests/roles/mock_data/get_cluster_status_db.txt b/tests/roles/mock_data/get_cluster_status_db.txt
index 24179b7f..43497538 100644
--- a/tests/roles/mock_data/get_cluster_status_db.txt
+++ b/tests/roles/mock_data/get_cluster_status_db.txt
@@ -11,7 +11,8 @@ def main():
database_sid=dict(type="str", required=True),
saphanasr_provider=dict(type="str", required=True),
db_instance_number=dict(type="str", required=True),
- hana_resource_name=dict(type="str", default="")
+ hana_clone_resource_name=dict(type="str", default=""),
+ hana_primitive_resource_name=dict(type="str", default="")
)
)
@@ -40,7 +41,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
elif counter == 1 or counter == 2:
result = {
@@ -53,7 +55,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
else:
result = {
@@ -66,7 +69,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
module.exit_json(**result)
diff --git a/tests/roles/mock_data/secondary_get_cluster_status_db.txt b/tests/roles/mock_data/secondary_get_cluster_status_db.txt
index 131c717f..a64302cb 100644
--- a/tests/roles/mock_data/secondary_get_cluster_status_db.txt
+++ b/tests/roles/mock_data/secondary_get_cluster_status_db.txt
@@ -11,7 +11,8 @@ def main():
database_sid=dict(type="str", required=True),
saphanasr_provider=dict(type="str", required=True),
db_instance_number=dict(type="str", required=True),
- hana_resource_name=dict(type="str", default="")
+ hana_clone_resource_name=dict(type="str", default=""),
+ hana_primitive_resource_name=dict(type="str", default="")
)
)
@@ -40,7 +41,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
elif counter == 1 or counter == 2:
result = {
@@ -53,7 +55,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
else:
result = {
@@ -66,7 +69,8 @@ def main():
"replication_mode": "sync",
"primary_site_name": "db01",
"operation_mode": "active",
- "stonith_action": "reboot"
+ "stonith_action": "reboot",
+ "PRIORITY_FENCING_DELAY": "15s"
}
module.exit_json(**result)