From 7e6a2dadbfa9d610712caa44b51bf149847363b2 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:43:28 -0300 Subject: [PATCH 01/72] python linter: clean xlators/features/utime/src/utime-gen-fops-h.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # xlators/features/utime/src/utime-gen-fops-h.py | 1 + # 1 file changed, 1 insertion(+) # xlators/features/utime/src/utime-gen-fops-h.py | 1 + # 1 file changed, 1 insertion(+) --- xlators/features/utime/src/utime-gen-fops-h.py | 1 + 1 file changed, 1 insertion(+) diff --git a/xlators/features/utime/src/utime-gen-fops-h.py b/xlators/features/utime/src/utime-gen-fops-h.py index e96274c229a..1aed6f142d8 100755 --- a/xlators/features/utime/src/utime-gen-fops-h.py +++ b/xlators/features/utime/src/utime-gen-fops-h.py @@ -20,6 +20,7 @@ 'ftruncate', 'create', 'open', 'removexattr', 'fremovexattr', 'readv', 'writev', 'setattr', 'fsetattr', 'copy_file_range'] + def gen_defaults(): for name, value in ops.items(): if name in utime_ops: From 0551736f8c9a78093e0ae3aee60a2b20d22cbe31 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:43:50 -0300 Subject: [PATCH 02/72] python linter: clean xlators/features/utime/src/utime-gen-fops-c.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # xlators/features/utime/src/utime-gen-fops-c.py | 2 ++ # 1 file changed, 2 insertions(+) # xlators/features/utime/src/utime-gen-fops-c.py | 2 ++ # 1 file changed, 2 insertions(+) --- xlators/features/utime/src/utime-gen-fops-c.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xlators/features/utime/src/utime-gen-fops-c.py b/xlators/features/utime/src/utime-gen-fops-c.py index 9fb3e1b8b1a..363ea7f9286 100755 --- a/xlators/features/utime/src/utime-gen-fops-c.py +++ b/xlators/features/utime/src/utime-gen-fops-c.py @@ -120,6 +120,7 @@ utime_setattr_ops = ['setattr', 'fsetattr'] utime_copy_file_range_ops = ['copy_file_range'] + def gen_defaults(): for name in ops: if name in utime_ops: @@ -138,6 +139,7 @@ def gen_defaults(): print(generate(FOPS_CBK_COMMON_TEMPLATE, name, cbk_subs)) print(generate(FOPS_COPY_FILE_RANGE_TEMPLATE, name, fop_subs)) + for l in open(sys.argv[1], 'r').readlines(): if l.find('#pragma generate') != -1: print("/* BEGIN GENERATED CODE - DO NOT MODIFY */") From 8b34ff1aad5ab12ff02eabcefa3a78962e52f00f Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:44:12 -0300 Subject: [PATCH 03/72] python linter: clean xlators/features/cloudsync/src/cloudsync-fops-h.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # xlators/features/cloudsync/src/cloudsync-fops-h.py | 1 + # 1 file changed, 1 insertion(+) # xlators/features/cloudsync/src/cloudsync-fops-h.py | 1 + # 1 file changed, 1 insertion(+) --- xlators/features/cloudsync/src/cloudsync-fops-h.py | 1 + 1 file changed, 1 insertion(+) diff --git a/xlators/features/cloudsync/src/cloudsync-fops-h.py b/xlators/features/cloudsync/src/cloudsync-fops-h.py index faa2de651a7..1ae4e4580d4 100755 --- a/xlators/features/cloudsync/src/cloudsync-fops-h.py +++ b/xlators/features/cloudsync/src/cloudsync-fops-h.py @@ -15,6 +15,7 @@ @LONG_ARGS@); """ + def gen_defaults(): for name, value in ops.items(): if name == 'getspec': From 5ee8ef11f71d87ae4a16cf0edf5452ebbf06cd2d Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:44:26 -0300 Subject: [PATCH 04/72] python linter: clean xlators/features/cloudsync/src/cloudsync-fops-c.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # xlators/features/cloudsync/src/cloudsync-fops-c.py | 2 ++ # 1 file changed, 2 insertions(+) # xlators/features/cloudsync/src/cloudsync-fops-c.py | 2 ++ # 1 file changed, 2 insertions(+) --- xlators/features/cloudsync/src/cloudsync-fops-c.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/xlators/features/cloudsync/src/cloudsync-fops-c.py b/xlators/features/cloudsync/src/cloudsync-fops-c.py index c27df97ae58..c4a16d9354e 100755 --- a/xlators/features/cloudsync/src/cloudsync-fops-c.py +++ b/xlators/features/cloudsync/src/cloudsync-fops-c.py @@ -305,6 +305,7 @@ special_fops = ['statfs', 'setxattr', 'unlink', 'getxattr', 'truncate', 'fstat', 'readv', 'readdirp'] + def gen_defaults(): for name in ops: if name in fd_data_modify_op_fop_template: @@ -315,6 +316,7 @@ def gen_defaults(): print(generate(LOC_STAT_OP_FOP_CBK_TEMPLATE, name, cbk_subs)) print(generate(LOC_STAT_OP_FOP_TEMPLATE, name, fop_subs)) + for l in open(sys.argv[1], 'r').readlines(): if l.find('#pragma generate') != -1: print("/* BEGIN GENERATED CODE - DO NOT MODIFY */") From 63c3d28bc21c60e90abf5996aac21c292fcbd41a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:44:57 -0300 Subject: [PATCH 05/72] python linter: clean xlators/features/changelog/lib/examples/python/libgfchangelog.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # .../features/changelog/lib/examples/python/libgfchangelog.py | 10 ++++++---- # 1 file changed, 6 insertions(+), 4 deletions(-) # .../features/changelog/lib/examples/python/libgfchangelog.py | 10 ++++++---- # 1 file changed, 6 insertions(+), 4 deletions(-) --- .../changelog/lib/examples/python/libgfchangelog.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/xlators/features/changelog/lib/examples/python/libgfchangelog.py b/xlators/features/changelog/lib/examples/python/libgfchangelog.py index 2da9f2d2a8c..e5ddda9ee02 100644 --- a/xlators/features/changelog/lib/examples/python/libgfchangelog.py +++ b/xlators/features/changelog/lib/examples/python/libgfchangelog.py @@ -2,6 +2,7 @@ from ctypes import * from ctypes.util import find_library + class Changes(object): libgfc = CDLL(find_library("gfchangelog"), mode=RTLD_GLOBAL, use_errno=True) @@ -26,9 +27,10 @@ def cl_init(cls): cls.raise_changelog_err() @classmethod - def cl_register(cls, brick, path, log_file, log_level, retries = 0): + def cl_register(cls, brick, path, log_file, log_level, retries=0): ret = cls._get_api('gf_changelog_register')(brick, path, - log_file, log_level, retries) + log_file, log_level, + retries) if ret == -1: cls.raise_oserr() @@ -46,7 +48,7 @@ def cl_startfresh(cls): @classmethod def cl_getchanges(cls): - """ remove hardcoding for path name length """ + """Remove hardcoding for path name length.""" def clsort(f): return f.split('.')[-1] changes = [] @@ -56,7 +58,7 @@ def clsort(f): while True: ret = call(buf, 4096) if ret in (0, -1): - break; + break changes.append(buf.raw[:ret-1]) if ret == -1: cls.raise_oserr() From 26bffdfe5cbb2ca423c9624a66cc2acbccb82939 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:45:16 -0300 Subject: [PATCH 06/72] python linter: clean xlators/features/changelog/lib/examples/python/changes.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # xlators/features/changelog/lib/examples/python/changes.py | 4 ++++ # 1 file changed, 4 insertions(+) # xlators/features/changelog/lib/examples/python/changes.py | 4 ++++ # 1 file changed, 4 insertions(+) --- xlators/features/changelog/lib/examples/python/changes.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/xlators/features/changelog/lib/examples/python/changes.py b/xlators/features/changelog/lib/examples/python/changes.py index c410d3b000d..9079309ff39 100755 --- a/xlators/features/changelog/lib/examples/python/changes.py +++ b/xlators/features/changelog/lib/examples/python/changes.py @@ -1,13 +1,16 @@ #!/usr/bin/python3 from __future__ import print_function + import os import sys import time + import libgfchangelog cl = libgfchangelog.Changes() + def get_changes(brick, scratch_dir, log_file, log_level, interval): change_list = [] try: @@ -26,6 +29,7 @@ def get_changes(brick, scratch_dir, log_file, log_level, interval): ex = sys.exc_info()[1] print(ex) + if __name__ == '__main__': if len(sys.argv) != 6: print(("usage: %s " From d6f29edad8a2238b5a76319a3f6b274670598018 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 23:49:43 -0300 Subject: [PATCH 07/72] python linter: clean tools/glusterfind/src/utils.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/utils.py | 4 +--- # 1 file changed, 1 insertion(+), 3 deletions(-) # tools/glusterfind/src/utils.py | 4 +--- # 1 file changed, 1 insertion(+), 3 deletions(-) --- tools/glusterfind/src/utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/tools/glusterfind/src/utils.py b/tools/glusterfind/src/utils.py index 906ebd8f252..9bf7537950b 100644 --- a/tools/glusterfind/src/utils.py +++ b/tools/glusterfind/src/utils.py @@ -152,9 +152,7 @@ def mkdirp(path, exit_on_err=False, logger=None): def fail(msg, code=1, logger=None): - """ - Write error to stderr and exit - """ + """Write error to stderr and exit.""" if logger: logger.error(msg) sys.stderr.write("%s\n" % msg) From ad63350255ce2e1fb1a3a1d017abf6e63c876a41 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:45:31 -0300 Subject: [PATCH 08/72] python linter: clean tools/glusterfind/src/nodeagent.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/nodeagent.py | 12 +++++++----- # 1 file changed, 7 insertions(+), 5 deletions(-) # tools/glusterfind/src/nodeagent.py | 12 +++++++----- # 1 file changed, 7 insertions(+), 5 deletions(-) --- tools/glusterfind/src/nodeagent.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/glusterfind/src/nodeagent.py b/tools/glusterfind/src/nodeagent.py index 679daa6fa76..79684133552 100644 --- a/tools/glusterfind/src/nodeagent.py +++ b/tools/glusterfind/src/nodeagent.py @@ -9,18 +9,20 @@ # later), or the GNU General Public License, version 2 (GPLv2), in all # cases as published by the Free Software Foundation. +import logging +import os import shutil import sys -import os -import logging from argparse import ArgumentParser, RawDescriptionHelpFormatter + try: import urllib.parse as urllib except ImportError: import urllib -from errno import ENOTEMPTY +from errno import ENOTEMPTY from utils import setup_logger, mkdirp, handle_rm_error + import conf logger = logging.getLogger() @@ -52,7 +54,7 @@ def mode_create(args): session_dir = os.path.join(conf.get_opt("session_dir"), args.session) status_file = os.path.join(session_dir, args.volume, - "%s.status" % urllib.quote_plus(args.brick)) + "%s.status" % urllib.quote_plus(args.brick)) mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True, logger=logger) @@ -67,7 +69,7 @@ def mode_create(args): def mode_post(args): session_dir = os.path.join(conf.get_opt("session_dir"), args.session) status_file = os.path.join(session_dir, args.volume, - "%s.status" % urllib.quote_plus(args.brick)) + "%s.status" % urllib.quote_plus(args.brick)) mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True, logger=logger) From 2b50ea01d26cccfaac165e06d2ccf435f3d090cc Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:45:45 -0300 Subject: [PATCH 09/72] python linter: clean tools/glusterfind/src/main.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/main.py | 55 +++++++++++++++++++++++-------------------- # 1 file changed, 29 insertions(+), 26 deletions(-) # tools/glusterfind/src/main.py | 55 +++++++++++++++++++++++-------------------- # 1 file changed, 29 insertions(+), 26 deletions(-) --- tools/glusterfind/src/main.py | 55 ++++++++++++++++++----------------- 1 file changed, 29 insertions(+), 26 deletions(-) diff --git a/tools/glusterfind/src/main.py b/tools/glusterfind/src/main.py index 4b5466d0114..6e05548a050 100644 --- a/tools/glusterfind/src/main.py +++ b/tools/glusterfind/src/main.py @@ -9,27 +9,27 @@ # later), or the GNU General Public License, version 2 (GPLv2), in all # cases as published by the Free Software Foundation. -import sys -from errno import ENOENT, ENOTEMPTY -import time -from multiprocessing import Process -import os -import xml.etree.cElementTree as etree -from argparse import ArgumentParser, RawDescriptionHelpFormatter, Action -from gfind_py2py3 import gfind_write_row, gfind_write +import codecs import logging +import os +import re import shutil -import tempfile import signal +import sys +import tempfile +import time +import xml.etree.cElementTree as etree +from argparse import Action, ArgumentParser, RawDescriptionHelpFormatter from datetime import datetime -import codecs -import re +from errno import ENOENT, ENOTEMPTY +from multiprocessing import Process -from utils import execute, is_host_local, mkdirp, fail -from utils import setup_logger, human_time, handle_rm_error -from utils import get_changelog_rollover_time, cache_output, create_file import conf from changelogdata import OutputMerger +from gfind_py2py3 import gfind_write, gfind_write_row +from utils import execute, fail, is_host_local, mkdirp +from utils import setup_logger, human_time, handle_rm_error +from utils import get_changelog_rollover_time, cache_output, create_file PROG_DESCRIPTION = """ GlusterFS Incremental API @@ -58,9 +58,7 @@ def get_pem_key_path(session, volume): def node_cmd(host, host_uuid, task, cmd, args, opts): - """ - Runs command via ssh if host is not local - """ + """Run commands via ssh if host is not local.""" try: localdir = is_host_local(host_uuid) @@ -281,7 +279,8 @@ def run_cmd_nodes(task, args, **kwargs): @cache_output def get_nodes(volume): - """ + """Get gluster volume info. + Get the gluster volume info xml output and parse to get the brick details. """ @@ -516,11 +515,13 @@ def write_output(outfile, outfilemerger, field_separator): continue if row_2_rep and row_2_rep != "": - gfind_write_row(f, row[0], field_separator, p_rep, row_2_rep) + gfind_write_row(f, row[0], + field_separator, p_rep, row_2_rep) else: gfind_write(f, row[0], field_separator, p_rep) + def validate_volume(volume): cmd = ["gluster", 'volume', 'info', volume, "--xml"] _, data, _ = execute(cmd, @@ -534,6 +535,7 @@ def validate_volume(volume): if statusStr != "Started": fail("Volume %s is not online" % volume) + # The rules for a valid session name. SESSION_NAME_RULES = { 'min_length': 2, @@ -548,17 +550,17 @@ def validate_session_name(session): # Check for minimum length if len(session) < SESSION_NAME_RULES['min_length']: fail('session_name must be at least ' + - str(SESSION_NAME_RULES['min_length']) + ' characters long.') + str(SESSION_NAME_RULES['min_length']) + ' characters long.') # Check for maximum length if len(session) > SESSION_NAME_RULES['max_length']: fail('session_name must not exceed ' + - str(SESSION_NAME_RULES['max_length']) + ' characters length.') + str(SESSION_NAME_RULES['max_length']) + ' characters length.') # Matches strings composed entirely of characters specified within if not re.match(r'^[' + SESSION_NAME_RULES['valid_chars'] + - ']+$', session): + ']+$', session): fail('Session name can only contain these characters: ' + - SESSION_NAME_RULES['valid_chars']) + SESSION_NAME_RULES['valid_chars']) def mode_create(session_dir, args): @@ -760,7 +762,8 @@ def mode_pre(session_dir, args): def mode_post(session_dir, args): - """ + """Post. + If pre session file exists, overwrite session file If pre session file does not exists, return ERROR """ @@ -797,7 +800,8 @@ def mode_delete(session_dir, args): def mode_list(session_dir, args): - """ + """List available sessions. + List available sessions to stdout, if session name is set only list that session. """ @@ -883,7 +887,6 @@ def main(): if args.mode not in ["list"]: validate_volume(args.volume) - # "default" is a system defined session name if args.mode in ["create", "post", "pre", "delete"] and \ args.session == "default": From 925640883eb4747edf1e4c697f552906c8767554 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:03:33 -0300 Subject: [PATCH 10/72] python linter: clean tools/glusterfind/src/libgfchangelog.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/libgfchangelog.py | 12 +++++++----- # 1 file changed, 7 insertions(+), 5 deletions(-) # tools/glusterfind/src/libgfchangelog.py | 12 +++++++----- # 1 file changed, 7 insertions(+), 5 deletions(-) --- tools/glusterfind/src/libgfchangelog.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/tools/glusterfind/src/libgfchangelog.py b/tools/glusterfind/src/libgfchangelog.py index 513bb101e93..ce0f17fdcac 100644 --- a/tools/glusterfind/src/libgfchangelog.py +++ b/tools/glusterfind/src/libgfchangelog.py @@ -11,6 +11,7 @@ import os from ctypes import CDLL, RTLD_GLOBAL, get_errno, create_string_buffer, c_ulong, byref from ctypes.util import find_library + from gfind_py2py3 import bytearray_to_str, gf_create_string_buffer from gfind_py2py3 import gfind_history_changelog, gfind_changelog_register from gfind_py2py3 import gfind_history_changelog_done @@ -19,6 +20,7 @@ class ChangelogException(OSError): pass + libgfc = CDLL(find_library("gfchangelog"), mode=RTLD_GLOBAL, use_errno=True) @@ -35,7 +37,7 @@ def cl_init(): def cl_register(brick, path, log_file, log_level, retries=0): - ret = gfind_changelog_register(libgfc, brick, path, log_file,log_level, retries) + ret = gfind_changelog_register(libgfc, brick, path, log_file, log_level, retries) if ret == -1: raise_oserr(prefix="gf_changelog_register") @@ -50,9 +52,9 @@ def cl_history_scan(): def cl_history_changelog(changelog_path, start, end, num_parallel): actual_end = c_ulong() - ret = gfind_history_changelog(libgfc,changelog_path, start, end, - num_parallel, - byref(actual_end)) + ret = gfind_history_changelog(libgfc, changelog_path, start, end, + num_parallel, + byref(actual_end)) if ret == -1: raise_oserr(prefix="gf_history_changelog") @@ -66,7 +68,7 @@ def cl_history_startfresh(): def cl_history_getchanges(): - """ remove hardcoding for path name length """ + """Remove hardcoding for path name length.""" def clsort(f): return f.split('.')[-1] From 3b63df623da813131c971b75480f204ed6947ebc Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:05:09 -0300 Subject: [PATCH 11/72] python linter: clean tools/glusterfind/src/gfind_py2py3.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/gfind_py2py3.py | 8 ++++---- # 1 file changed, 4 insertions(+), 4 deletions(-) # tools/glusterfind/src/gfind_py2py3.py | 8 ++++---- # 1 file changed, 4 insertions(+), 4 deletions(-) --- tools/glusterfind/src/gfind_py2py3.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/glusterfind/src/gfind_py2py3.py b/tools/glusterfind/src/gfind_py2py3.py index 87324fbf350..bd245891ec7 100644 --- a/tools/glusterfind/src/gfind_py2py3.py +++ b/tools/glusterfind/src/gfind_py2py3.py @@ -30,12 +30,12 @@ def gf_create_string_buffer(size): def gfind_history_changelog(libgfc, changelog_path, start, end, num_parallel, actual_end): return libgfc.gf_history_changelog(changelog_path.encode(), start, end, num_parallel, - actual_end) + actual_end) def gfind_changelog_register(libgfc, brick, path, log_file, log_level, retries): return libgfc.gf_changelog_register(brick.encode(), path.encode(), log_file.encode(), - log_level, retries) + log_level, retries) def gfind_history_changelog_done(libgfc, clfile): return libgfc.gf_history_changelog_done(clfile.encode()) @@ -65,12 +65,12 @@ def gf_create_string_buffer(size): def gfind_history_changelog(libgfc, changelog_path, start, end, num_parallel, actual_end): return libgfc.gf_history_changelog(changelog_path, start, end, - num_parallel, actual_end) + num_parallel, actual_end) def gfind_changelog_register(libgfc, brick, path, log_file, log_level, retries): return libgfc.gf_changelog_register(brick, path, log_file, - log_level, retries) + log_level, retries) def gfind_history_changelog_done(libgfc, clfile): return libgfc.gf_history_changelog_done(clfile) From 92c94c67578829ee09dba3d1a8106ad7a8d81b35 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:05:08 -0300 Subject: [PATCH 12/72] python linter: clean tools/glusterfind/src/changelog.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/changelog.py | 6 +++--- # 1 file changed, 3 insertions(+), 3 deletions(-) # tools/glusterfind/src/changelog.py | 6 +++--- # 1 file changed, 3 insertions(+), 3 deletions(-) --- tools/glusterfind/src/changelog.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/glusterfind/src/changelog.py b/tools/glusterfind/src/changelog.py index a5e9ea4288f..aa73b248e97 100644 --- a/tools/glusterfind/src/changelog.py +++ b/tools/glusterfind/src/changelog.py @@ -287,7 +287,7 @@ def get_changes(brick, hash_dir, log_file, start, end, args): session_dir = os.path.join(conf.get_opt("session_dir"), args.session) status_file = os.path.join(session_dir, args.volume, - "%s.status" % urllib.quote_plus(args.brick)) + "%s.status" % urllib.quote_plus(args.brick)) # Get previous session try: @@ -415,7 +415,7 @@ def _get_args(): action="store_true") parser.add_argument("--output-prefix", help="File prefix in output", default=".") - parser.add_argument("--type",default="both") + parser.add_argument("--type", default="both") parser.add_argument("-N", "--only-namespace-changes", help="List only namespace changes", action="store_true") @@ -435,7 +435,7 @@ def _get_args(): session_dir = os.path.join(conf.get_opt("session_dir"), args.session) status_file = os.path.join(session_dir, args.volume, - "%s.status" % urllib.quote_plus(args.brick)) + "%s.status" % urllib.quote_plus(args.brick)) status_file_pre = status_file + ".pre" mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True, logger=logger) From 4b16b8c6a067b1945a8421204fbc5567a9ed71b4 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:05:06 -0300 Subject: [PATCH 13/72] python linter: clean tools/glusterfind/src/brickfind.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/glusterfind/src/brickfind.py | 11 ++++++----- # 1 file changed, 6 insertions(+), 5 deletions(-) # tools/glusterfind/src/brickfind.py | 11 ++++++----- # 1 file changed, 6 insertions(+), 5 deletions(-) --- tools/glusterfind/src/brickfind.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/glusterfind/src/brickfind.py b/tools/glusterfind/src/brickfind.py index 73b6350188d..de578890a07 100644 --- a/tools/glusterfind/src/brickfind.py +++ b/tools/glusterfind/src/brickfind.py @@ -13,16 +13,17 @@ import sys import logging from argparse import ArgumentParser, RawDescriptionHelpFormatter + try: import urllib.parse as urllib except ImportError: import urllib + import time from utils import mkdirp, setup_logger, create_file, output_write, find import conf - PROG_DESCRIPTION = """ Changelog Crawler """ @@ -51,10 +52,10 @@ def output_callback(path, filter_result, is_dir): field_separator=args.field_separator) else: if (is_dir and args.type == "d") or ( - (not is_dir) and args.type == "f"): + (not is_dir) and args.type == "f"): output_write(fout, path, args.output_prefix, - encode=(not args.no_encode), tag=args.tag, - field_separator=args.field_separator) + encode=(not args.no_encode), tag=args.tag, + field_separator=args.field_separator) ignore_dirs = [os.path.join(brick, dirname) for dirname in @@ -98,7 +99,7 @@ def _get_args(): args = _get_args() session_dir = os.path.join(conf.get_opt("session_dir"), args.session) status_file = os.path.join(session_dir, args.volume, - "%s.status" % urllib.quote_plus(args.brick)) + "%s.status" % urllib.quote_plus(args.brick)) status_file_pre = status_file + ".pre" mkdirp(os.path.join(session_dir, args.volume), exit_on_err=True, logger=logger) From 263aab7838de421433451edf102e5421cead785a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 11:53:33 -0300 Subject: [PATCH 14/72] python linter: clean tests/utils/setfattr.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/setfattr.py | 19 +++++++++++-------- # 1 file changed, 11 insertions(+), 8 deletions(-) # tests/utils/setfattr.py | 19 +++++++++++-------- # 1 file changed, 11 insertions(+), 8 deletions(-) --- tests/utils/setfattr.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/tests/utils/setfattr.py b/tests/utils/setfattr.py index 8b7b6abacc0..d8456324681 100755 --- a/tests/utils/setfattr.py +++ b/tests/utils/setfattr.py @@ -1,11 +1,13 @@ - +"""Test utils for setfattr.""" import os import sys from optparse import OptionParser import xattr + def convert(string): + """Decode strings from base64 or hex.""" tmp_string = string if (string[0] == '0' and (string[1] == 's' or @@ -25,6 +27,7 @@ def convert(string): return tmp_string + if __name__ == '__main__': usage = "usage: %prog [-n name] [-v value] [-x name]" parser = OptionParser(usage=usage) @@ -47,18 +50,18 @@ def convert(string): (option, args) = parser.parse_args() if not args: - print ("Usage: setfattr {-n name} [-v value] file...") - print (" setfattr {-x name} file...") - print ("Try `setfattr --help' for more information.") + print("Usage: setfattr {-n name} [-v value] file...") + print(" setfattr {-x name} file...") + print("Try `setfattr --help' for more information.") sys.exit(1) if option.name and option.xname: - print ("-n and -x are mutually exclusive...") + print("-n and -x are mutually exclusive...") sys.exit(1) if option.name: if option.value is None: - print ("-n option requires -v value...") + print("-n option requires -v value...") args[0] = os.path.abspath(args[0]) @@ -66,12 +69,12 @@ def convert(string): try: xattr.setxattr(args[0], option.name, convert(option.value)) except Exception as err: - print (err) + print(err) sys.exit(1) if option.xname: try: xattr.removexattr(args[0], option.xname) except Exception as err: - print (err) + print(err) sys.exit(1) From 235c6be5f4bbed06d0a238171b40979cfb991879 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:37:38 -0300 Subject: [PATCH 15/72] python linter: clean tests/utils/py2py3.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/py2py3.py | 3 +++ # 1 file changed, 3 insertions(+) # tests/utils/py2py3.py | 3 +++ # 1 file changed, 3 insertions(+) --- tests/utils/py2py3.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/utils/py2py3.py b/tests/utils/py2py3.py index 63aca10fd26..946cc127d6f 100644 --- a/tests/utils/py2py3.py +++ b/tests/utils/py2py3.py @@ -1,3 +1,4 @@ +"""Test utils py2py3.""" # # Copyright (c) 2018 Red Hat, Inc. # This file is part of GlusterFS. @@ -16,9 +17,11 @@ import struct from ctypes import create_string_buffer + def umask(): return os.umask(0) + if sys.version_info >= (3,): def pipe(): (r, w) = os.pipe() From c3affa41cf28751af088af4da9f3b775b1339fb1 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 13:05:56 -0300 Subject: [PATCH 16/72] python linter: clean tests/utils/libcxattr.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/libcxattr.py | 23 +++++++++++++++++++---- # 1 file changed, 19 insertions(+), 4 deletions(-) # tests/utils/libcxattr.py | 23 +++++++++++++++++++---- # 1 file changed, 19 insertions(+), 4 deletions(-) --- tests/utils/libcxattr.py | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/tests/utils/libcxattr.py b/tests/utils/libcxattr.py index 3f3ed1fffbb..2b35bcbb8bc 100644 --- a/tests/utils/libcxattr.py +++ b/tests/utils/libcxattr.py @@ -1,3 +1,4 @@ +"""Test utils libcxattr.""" # # Copyright (c) 2011-2014 Red Hat, Inc. # This file is part of GlusterFS. @@ -11,14 +12,15 @@ import os import sys from ctypes import CDLL, c_int + from py2py3 import bytearray_to_str, gr_create_string_buffer from py2py3 import gr_query_xattr, gr_lsetxattr, gr_lremovexattr class Xattr(object): + """Singleton that wraps the extended attributes system. - """singleton that wraps the extended attributes system - interface for python using ctypes + interface for python using ctypes Just implement it to the degree we need it, in particular - we need just the l*xattr variants, ie. we never want symlinks to be @@ -35,6 +37,7 @@ class Xattr(object): @classmethod def geterrno(cls): + """Get errno.""" if sys.hexversion >= 0x02060000: from ctypes import get_errno return get_errno() @@ -43,11 +46,13 @@ def geterrno(cls): @classmethod def raise_oserr(cls): + """Throws exception over errno.""" errn = cls.geterrno() raise OSError(errn, os.strerror(errn)) @classmethod def _query_xattr(cls, path, siz, syscall, *a): + """Get file extended attributes (xattr).""" if siz: buf = gr_create_string_buffer(siz) else: @@ -65,11 +70,15 @@ def _query_xattr(cls, path, siz, syscall, *a): @classmethod def lgetxattr(cls, path, attr, siz=0): + """Get file extended attributes (xattr).""" return gr_query_xattr(cls, path, siz, 'lgetxattr', attr) @classmethod def lgetxattr_buf(cls, path, attr): - """lgetxattr variant with size discovery""" + """Get file extended attributes (xattr). + + lgetxattr variant with size discovery + """ size = cls.lgetxattr(path, attr) if size == -1: cls.raise_oserr() @@ -79,6 +88,7 @@ def lgetxattr_buf(cls, path, attr): @classmethod def llistxattr(cls, path, siz=0): + """List file extended attributes (xattr).""" ret = gr_query_xattr(cls, path, siz, 'llistxattr') if isinstance(ret, str): ret = ret.strip('\0') @@ -87,19 +97,24 @@ def llistxattr(cls, path, siz=0): @classmethod def lsetxattr(cls, path, attr, val): + """List file extended attributes (xattr).""" ret = gr_lsetxattr(cls, path, attr, val) if ret == -1: cls.raise_oserr() @classmethod def lremovexattr(cls, path, attr): + """Remove file extended attributes (xattr).""" ret = gr_lremovexattr(cls, path, attr) if ret == -1: cls.raise_oserr() @classmethod def llistxattr_buf(cls, path): - """listxattr variant with size discovery""" + """List file extended attributes (xattr). + + listxattr variant with size discovery + """ size = cls.llistxattr(path) if size == -1: cls.raise_oserr() From af0a2c86ed48eabdc256ebb8621a26ef60ca497a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 12:14:04 -0300 Subject: [PATCH 17/72] python linter: clean tests/utils/gfid-access.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/gfid-access.py | 45 +++++++++++++++++++++++++++++++-------------- # 1 file changed, 31 insertions(+), 14 deletions(-) # tests/utils/gfid-access.py | 45 +++++++++++++++++++++++++++++++-------------- # 1 file changed, 31 insertions(+), 14 deletions(-) --- tests/utils/gfid-access.py | 45 ++++++++++++++++++++++++++------------ 1 file changed, 31 insertions(+), 14 deletions(-) diff --git a/tests/utils/gfid-access.py b/tests/utils/gfid-access.py index c35c1223df6..d28b0050439 100755 --- a/tests/utils/gfid-access.py +++ b/tests/utils/gfid-access.py @@ -1,3 +1,4 @@ +"""Test utils for gfid-acess.""" # # Copyright (c) 2011-2014 Red Hat, Inc. # This file is part of GlusterFS. @@ -9,33 +10,43 @@ # from __future__ import print_function + import os +import random import sys import stat -import time import struct -import random -import libcxattr - +import time from errno import EEXIST +import libcxattr + Xattr = libcxattr.Xattr() + def umask(): + """Wrap os.umask.""" return os.umask(0) + def _fmt_mknod(l): + """Wrap format string for mknod.""" return "!II%dsI%dsIII" % (37, l+1) + def _fmt_mkdir(l): + """Wrap format string for mkdir.""" return "!II%dsI%dsII" % (37, l+1) + def _fmt_symlink(l1, l2): + """Wrap format string for symlink.""" return "!II%dsI%ds%ds" % (37, l1+1, l2+1) if sys.version_info > (3,): def entry_pack_reg(gf, bn, mo, uid, gid): + """Pack reg.""" bn_encoded = bn.encode() blen = len(bn_encoded) return struct.pack(_fmt_mknod(blen), @@ -44,13 +55,16 @@ def entry_pack_reg(gf, bn, mo, uid, gid): # mkdir def entry_pack_dir(gf, bn, mo, uid, gid): + """Pack dir.""" bn_encoded = bn.encode() blen = len(bn_encoded) return struct.pack(_fmt_mkdir(blen), uid, gid, gf.encode(), mo, bn_encoded, stat.S_IMODE(mo), umask()) + # symlink def entry_pack_symlink(gf, bn, lnk, st): + """Pack symlink.""" bn_encoded = bn.encode() blen = len(bn_encoded) lnk_encoded = lnk.encode() @@ -62,18 +76,21 @@ def entry_pack_symlink(gf, bn, lnk, st): else: def entry_pack_reg(gf, bn, mo, uid, gid): + """Pack reg.""" blen = len(bn) return struct.pack(_fmt_mknod(blen), uid, gid, gf, mo, bn, stat.S_IMODE(mo), 0, umask()) def entry_pack_dir(gf, bn, mo, uid, gid): + """Pack dir.""" blen = len(bn) return struct.pack(_fmt_mkdir(blen), uid, gid, gf, mo, bn, stat.S_IMODE(mo), umask()) def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): + """Pack symlink.""" blen = len(bn) llen = len(lnk) return struct.pack(_fmt_symlink(blen, llen), @@ -83,15 +100,15 @@ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): if len(sys.argv) < 9: print(("USAGE: %s " " " % (sys.argv[0]))) - sys.exit(-1) # nothing to do - mtpt = sys.argv[1] - pargfid = sys.argv[2] - fname = sys.argv[3] + sys.exit(-1) # nothing to do + mtpt = sys.argv[1] + pargfid = sys.argv[2] + fname = sys.argv[3] randomgfid = sys.argv[4] - ftype = sys.argv[5] - uid = int(sys.argv[6]) - gid = int(sys.argv[7]) - perm = int(sys.argv[8], 8) + ftype = sys.argv[5] + uid = int(sys.argv[6]) + gid = int(sys.argv[7]) + perm = int(sys.argv[8], 8) os.chdir(mtpt) if pargfid == 'ROOT': @@ -105,10 +122,10 @@ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): if ftype == 'file': mode = stat.S_IFREG | perm blob = entry_pack_reg(randomgfid, fname, mode, uid, gid) - elif ftype =='dir': + elif ftype == 'dir': mode = stat.S_IFDIR | perm blob = entry_pack_dir(randomgfid, fname, mode, uid, gid) - else: # not yet... + else: # not yet... sys.exit(-1) if blob == None: From 8a54697752df4a589386a30e9ec05d16785f371a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 12:55:49 -0300 Subject: [PATCH 18/72] python linter: clean tests/utils/getfattr.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/getfattr.py | 39 +++++++++++++++++++++++++-------------- # 1 file changed, 25 insertions(+), 14 deletions(-) # tests/utils/getfattr.py | 39 +++++++++++++++++++++++++-------------- # 1 file changed, 25 insertions(+), 14 deletions(-) --- tests/utils/getfattr.py | 39 +++++++++++++++++++++++++-------------- 1 file changed, 25 insertions(+), 14 deletions(-) diff --git a/tests/utils/getfattr.py b/tests/utils/getfattr.py index 3eb40e1c887..2069e5059ce 100755 --- a/tests/utils/getfattr.py +++ b/tests/utils/getfattr.py @@ -1,35 +1,43 @@ +"""Test utils getfattr.""" from __future__ import print_function + import os import sys from optparse import OptionParser import xattr + def handle_textencoding(attr): - ### required for Python's handling of NULL strings. + """Fix null strings.""" + # required for Python's handling of NULL strings. attr_null_replace = (attr.encode('hex').decode('hex')).replace('\x00', '\\000') return attr_null_replace + def getfattr(path, option): + """Get file extended attributes (xattr).""" attr = xattr.getxattr(path, option.name) encoded_attr = attr if option.encoding == "text": - ## special case handle it. + # special case handle it. encoded_attr = handle_textencoding(attr) else: encoded_attr = attr.encode(option.encoding) if option.onlyvalues: - print (encoded_attr) + print(encoded_attr) return - print_getfattr (path, option, encoded_attr) + print_getfattr(path, option, encoded_attr) return -def print_getfattr (path, option, encoded_attr=None): + +def print_getfattr(path, option, encoded_attr=None): + """Print file extended attributes (xattr).""" if encoded_attr: if option.encoding == "hex": print(("%s=0x%s" % (option.name, encoded_attr))) @@ -42,13 +50,16 @@ def print_getfattr (path, option, encoded_attr=None): return -def print_header (path, absnames): + +def print_header(path, absnames): + """Print informative header.""" if absnames: print(("# file: %s" % path)) else: - print ("getfattr: Removing leading '/' from absolute path names") + print("getfattr: Removing leading '/' from absolute path names") print(("# file: %s" % path[1:])) + if __name__ == '__main__': usage = "usage: %prog [-n name|-d] [-e en] [-m pattern] path...." parser = OptionParser(usage=usage) @@ -82,17 +93,17 @@ def print_header (path, absnames): (option, args) = parser.parse_args() if not args: - print ("Usage: getfattr [-hRLP] [-n name|-d] [-e en] [-m pattern]" - " path...") - print ("Try `getfattr --help' for more information.") + print("Usage: getfattr [-hRLP] [-n name|-d] [-e en] [-m pattern]" + " path...") + print("Try `getfattr --help' for more information.") sys.exit(1) if option.dump and option.name: - print ("-d and -n are mutually exclusive...") + print("-d and -n are mutually exclusive...") sys.exit(1) if option.pattern and option.name: - print ("-m and -n are mutually exclusive...") + print("-m and -n are mutually exclusive...") sys.exit(1) if option.encoding: @@ -113,7 +124,7 @@ def print_header (path, absnames): print(("Invalid key %s" % err)) sys.exit(1) except IOError as err: - print (err) + print(err) sys.exit(1) if option.pattern: @@ -129,5 +140,5 @@ def print_header (path, absnames): print_getfattr(args[0], option, None) except IOError as err: - print (err) + print(err) sys.exit(1) From 41ad1fadae996f507b5d61c014d33a8b13047734 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 13:11:02 -0300 Subject: [PATCH 19/72] python linter: clean tests/utils/pidof.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/pidof.py | 12 ++++++++++-- # 1 file changed, 10 insertions(+), 2 deletions(-) # tests/utils/pidof.py | 12 ++++++++++-- # 1 file changed, 10 insertions(+), 2 deletions(-) --- tests/utils/pidof.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/tests/utils/pidof.py b/tests/utils/pidof.py index 4b7071c0a48..0a10e01e04d 100755 --- a/tests/utils/pidof.py +++ b/tests/utils/pidof.py @@ -1,5 +1,6 @@ - +"""Test utils pidof.""" from __future__ import print_function + import sys try: @@ -8,14 +9,18 @@ print("Please install psutil --> pip install psutil") sys.exit(1) + def pmap_find(p, name): + """Find process memory maps.""" for m in p.memory_maps(grouped=True): if m.path.endswith("%s.so" % name): return True continue return False + def pidof(processname): + """Find pid of gluster processes.""" for p in psutil.process_iter(): if p.pid == 0: continue @@ -30,7 +35,9 @@ def pidof(processname): if processname.strip() == p.name(): print((p.pid)) + def main(argv): + """Run program.""" if len(argv) < 2: sys.stderr.write("Usage: %s \n" % (argv[0],)) return 1 @@ -38,8 +45,9 @@ def main(argv): pidof(argv[1]) except Exception as err: print(err) - sys.stderr.write("Please be root - %s\n" % err); + sys.stderr.write("Please be root - %s\n" % err) sys.exit(1) + if __name__ == "__main__": main(sys.argv) From db4b62e1f466394110e543cce0d5abe1ae0d4047 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:38:22 -0300 Subject: [PATCH 20/72] python linter: clean tests/utils/changelogparser.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/changelogparser.py | 15 +++++++++++++-- # 1 file changed, 13 insertions(+), 2 deletions(-) # tests/utils/changelogparser.py | 15 +++++++++++++-- # 1 file changed, 13 insertions(+), 2 deletions(-) --- tests/utils/changelogparser.py | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/tests/utils/changelogparser.py b/tests/utils/changelogparser.py index 3b8f81d1bad..8481c5cc4ec 100644 --- a/tests/utils/changelogparser.py +++ b/tests/utils/changelogparser.py @@ -1,5 +1,6 @@ # -*- coding: utf-8 -*- -""" +"""Tests utils changelogparser. + Why? Converts this @@ -43,6 +44,8 @@ class NumTokens_V11(object): + """Number tokes for v11.""" + E = 7 M = 3 D = 2 @@ -63,17 +66,24 @@ class NumTokens_V11(object): class NumTokens_V12(NumTokens_V11): + """Number tokes for v12.""" + UNLINK = 5 RMDIR = 5 class Version: + """Versio type.""" + V11 = "v1.1" V12 = "v1.2" class Record(object): + """A record in the changelog.""" + def __init__(self, **kwargs): + """Initialize a Record object.""" self.ts = kwargs.get("ts", None) self.fop_type = kwargs.get("fop_type", None) self.gfid = kwargs.get("gfid", None) @@ -159,7 +169,7 @@ def process_record(data, tokens, changelog_ts, callback): if not changelog_ts: ts1 = int(changelog_ts) else: - ts1="" + ts1 = "" record = Record(ts=ts1, fop_type=data[tokens[0]], gfid=data[tokens[1]]) if data[tokens[0]] == META: @@ -233,4 +243,5 @@ def parse(filename, callback=default_callback): process_record(data, tokens, changelog_ts, callback) tokens = [] + parse(sys.argv[1]) From d6fddc6ac891fd207ba038977b13fd4af893c526 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 12:48:30 -0300 Subject: [PATCH 21/72] python linter: clean tests/utils/create-files.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/create-files.py | 37 +++++++++++++++++++++++++++++++++++-- # 1 file changed, 35 insertions(+), 2 deletions(-) # tests/utils/create-files.py | 37 +++++++++++++++++++++++++++++++++++-- # 1 file changed, 35 insertions(+), 2 deletions(-) --- tests/utils/create-files.py | 37 +++++++++++++++++++++++++++++++++++-- 1 file changed, 35 insertions(+), 2 deletions(-) diff --git a/tests/utils/create-files.py b/tests/utils/create-files.py index 04736e9c73b..ab2f4a7b2ed 100755 --- a/tests/utils/create-files.py +++ b/tests/utils/create-files.py @@ -1,14 +1,17 @@ +"""Test utils create-files.""" # This script was developed by Vijaykumar Koppad (vkoppad@redhat.com) # The latest version of this script can found at # http://github.com/vijaykumar-koppad/crefi from __future__ import with_statement + import os import re import sys import time import errno + import xattr import string import random @@ -19,13 +22,17 @@ datsiz = 0 timr = 0 + def get_ascii_upper_alpha_digits(): - if sys.version_info > (3,0): + """Get alphanumeric characters [a-Z0-9].""" + if sys.version_info > (3, 0): return string.ascii_uppercase+string.digits else: return string.uppercase+string.digits + def setLogger(filename): + """Enable global logger.""" global logger logger = logging.getLogger(filename) logger.setLevel(logging.DEBUG) @@ -33,6 +40,7 @@ def setLogger(filename): def setupLogger(filename): + """Configure logger.""" logger = logging.getLogger(filename) logger.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - %(message)s') @@ -44,6 +52,7 @@ def setupLogger(filename): def os_rd(src, size): + """Read up to size from src.""" global datsiz fd = os.open(src, os.O_RDONLY) data = os.read(fd, size) @@ -53,6 +62,7 @@ def os_rd(src, size): def os_wr(dest, data): + """Write data to dest.""" global timr st = time.time() fd = os.open(dest, os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644) @@ -64,6 +74,7 @@ def os_wr(dest, data): def create_sparse_file(fil, size, mins, maxs, rand): + """Create sparse zero-filled file.""" if rand: size = random.randint(mins, maxs) else: @@ -74,6 +85,7 @@ def create_sparse_file(fil, size, mins, maxs, rand): def create_binary_file(fil, size, mins, maxs, rand): + """Create binary random-filled file.""" if rand: size = random.randint(mins, maxs) else: @@ -84,6 +96,7 @@ def create_binary_file(fil, size, mins, maxs, rand): def create_txt_file(fil, size, mins, maxs, rand): + """Create text (txt) file.""" if rand: size = random.randint(mins, maxs) if size < 500*1024: @@ -101,6 +114,7 @@ def create_txt_file(fil, size, mins, maxs, rand): def create_tar_file(fil, size, mins, maxs, rand): + """Create tar file.""" if rand: size = random.randint(mins, maxs) else: @@ -115,6 +129,7 @@ def create_tar_file(fil, size, mins, maxs, rand): def get_filename(flen): + """Get a file name.""" size = flen char = get_ascii_upper_alpha_digits() st = ''.join(random.choice(char) for i in range(size)) @@ -124,6 +139,7 @@ def get_filename(flen): def text_files(files, file_count, inter, size, mins, maxs, rand, flen, randname, dir_path): + """Count text files.""" global datsiz, timr for k in range(files): if not file_count % inter: @@ -139,6 +155,7 @@ def text_files(files, file_count, inter, size, mins, maxs, rand, def sparse_files(files, file_count, inter, size, mins, maxs, rand, flen, randname, dir_path): + """Count sparse files.""" for k in range(files): if not file_count % inter: logger.info("Total files created -- "+str(file_count)) @@ -153,6 +170,7 @@ def sparse_files(files, file_count, inter, size, mins, maxs, def binary_files(files, file_count, inter, size, mins, maxs, rand, flen, randname, dir_path): + """Count binary files.""" for k in range(files): if not file_count % inter: logger.info("Total files created -- "+str(file_count)) @@ -167,6 +185,7 @@ def binary_files(files, file_count, inter, size, mins, maxs, def tar_files(files, file_count, inter, size, mins, maxs, rand, flen, randname, dir_path): + """Count tar files.""" for k in range(files): if not file_count % inter: logger.info("Total files created -- "+str(file_count)) @@ -180,6 +199,7 @@ def tar_files(files, file_count, inter, size, mins, maxs, def setxattr_files(files, randname, dir_path): + """Set or get extended attributes (xattr) in files.""" char = get_ascii_upper_alpha_digits() if not randname: for k in range(files): @@ -196,6 +216,7 @@ def setxattr_files(files, randname, dir_path): def rename_files(files, flen, randname, dir_path): + """Rename files.""" if not randname: for k in range(files): os.rename(dir_path + "/" + "file" + str(k), @@ -211,6 +232,7 @@ def rename_files(files, flen, randname, dir_path): def truncate_files(files, mins, maxs, randname, dir_path): + """Truncate files.""" if not randname: for k in range(files): byts = random.randint(mins, maxs) @@ -229,6 +251,7 @@ def truncate_files(files, mins, maxs, randname, dir_path): def chmod_files(files, flen, randname, dir_path): + """Change permission mode bits on files.""" if not randname: for k in range(files): mod = random.randint(0, 511) @@ -240,12 +263,16 @@ def chmod_files(files, flen, randname, dir_path): os.chmod(dir_path+"/"+fil, mod) return + def random_og(path): + """Set random ownwer on file.""" u = random.randint(1025, 65536) g = -1 os.chown(path, u, g) + def chown_files(files, flen, randname, dir_path): + """Change owner of files.""" if not randname: for k in range(files): random_og(dir_path+"/"+"file"+str(k)) @@ -257,6 +284,7 @@ def chown_files(files, flen, randname, dir_path): def chgrp_files(files, flen, randname, dir_path): + """Change group owner of files.""" if not randname: for k in range(files): random_og(dir_path+"/"+"file"+str(k)) @@ -268,6 +296,7 @@ def chgrp_files(files, flen, randname, dir_path): def symlink_files(files, flen, randname, dir_path): + """Create symlinks.""" try: os.makedirs(dir_path+"/"+"symlink_to_files") except OSError as ex: @@ -288,6 +317,7 @@ def symlink_files(files, flen, randname, dir_path): def hardlink_files(files, flen, randname, dir_path): + """Create hardlinks.""" try: os.makedirs(dir_path+"/"+"hardlink_to_files") except OSError as ex: @@ -309,6 +339,7 @@ def hardlink_files(files, flen, randname, dir_path): def human2bytes(size): + """Convert human-readable size to bytes.""" size_short = { 1024: ['K', 'KB', 'KiB', 'k', 'kB', 'kiB'], 1024*1024: ['M', 'MB', 'MiB'], @@ -326,6 +357,7 @@ def human2bytes(size): def bytes2human(byts): + """Convert bytes to human-readable size.""" abbr = { 1 << 30: "GB", 1 << 20: "MB", @@ -343,6 +375,7 @@ def bytes2human(byts): def multipledir(mnt_pnt, brdth, depth, files, fop, file_type="text", inter="1000", size="100K", mins="10K", maxs="500K", rand=False, l=10, randname=False): + """Test multiple directories.""" files_count = 1 size = human2bytes(size) maxs = human2bytes(maxs) @@ -458,7 +491,7 @@ def multipledir(mnt_pnt, brdth, depth, files, fop, file_type="text", def singledir(mnt_pnt, files, fop, file_type="text", inter="1000", size="100K", mins="10K", maxs="500K", rand=False, l=10, randname=False): - + """Test a single directory.""" files_count = 1 size = human2bytes(size) maxs = human2bytes(maxs) From eb784a2b9f55ef6c9708c1170601ed50367567f7 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 14:06:35 -0300 Subject: [PATCH 22/72] python linter: clean tests/features/ipctest.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/features/ipctest.py | 36 ++++++++++++++++++++---------------- # 1 file changed, 20 insertions(+), 16 deletions(-) # tests/features/ipctest.py | 36 ++++++++++++++++++++---------------- # 1 file changed, 20 insertions(+), 16 deletions(-) --- tests/features/ipctest.py | 36 ++++++++++++++++++++---------------- 1 file changed, 20 insertions(+), 16 deletions(-) diff --git a/tests/features/ipctest.py b/tests/features/ipctest.py index f6f699cf5c4..3a8f5624cfd 100755 --- a/tests/features/ipctest.py +++ b/tests/features/ipctest.py @@ -1,28 +1,32 @@ - +"""Test utils ipctest.""" from __future__ import print_function + import ctypes api = ctypes.CDLL("libgfapi.so", mode=ctypes.RTLD_GLOBAL) -api.glfs_ipc.argtypes = [ ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p ] +api.glfs_ipc.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p] api.glfs_ipc.restype = ctypes.c_int -def do_ipc (host, volume): - fs = api.glfs_new(volume) - #api.glfs_set_logging(fs, "/dev/stderr", 7) - api.glfs_set_volfile_server(fs, "tcp", host, 24007) - api.glfs_init(fs) - ret = api.glfs_ipc(fs, 1470369258, 0, 0) - api.glfs_fini(fs) +def do_ipc(host, volume): + """Do one ipc using the api.""" + fs = api.glfs_new(volume) + # api.glfs_set_logging(fs, "/dev/stderr", 7) + api.glfs_set_volfile_server(fs, "tcp", host, 24007) + + api.glfs_init(fs) + ret = api.glfs_ipc(fs, 1470369258, 0, 0) + api.glfs_fini(fs) + + return ret - return ret if __name__ == "__main__": - import sys + import sys - try: - res = do_ipc(*sys.argv[1:3]) - print(res) - except: - print("IPC failed (volume not started?)") + try: + res = do_ipc(*sys.argv[1:3]) + print(res) + except: + print("IPC failed (volume not started?)") From 352a74f02cda421ccc22c134c21ee6568bb659d8 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 14:19:30 -0300 Subject: [PATCH 23/72] python linter: clean tests/bugs/nfs/socket-as-fifo.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/bugs/nfs/socket-as-fifo.py | 10 ++++++---- # 1 file changed, 6 insertions(+), 4 deletions(-) # tests/bugs/nfs/socket-as-fifo.py | 10 ++++++---- # 1 file changed, 6 insertions(+), 4 deletions(-) --- tests/bugs/nfs/socket-as-fifo.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/tests/bugs/nfs/socket-as-fifo.py b/tests/bugs/nfs/socket-as-fifo.py index eb507e1d30b..af33344ce18 100755 --- a/tests/bugs/nfs/socket-as-fifo.py +++ b/tests/bugs/nfs/socket-as-fifo.py @@ -1,3 +1,4 @@ +"""Tests for bugs nfs socket-as-fifo.""" # # Create a unix domain socket and test if it is a socket (and not a fifo/pipe). # @@ -5,16 +6,17 @@ # from __future__ import print_function + import os +import socket import stat import sys -import socket ret = 1 if len(sys.argv) != 2: - print('Usage: %s ' % (sys.argv[0])) - sys.exit(ret) + print('Usage: %s ' % (sys.argv[0])) + sys.exit(ret) path = sys.argv[1] @@ -25,7 +27,7 @@ mode = stbuf.st_mode if stat.S_ISSOCK(mode): - ret = 0 + ret = 0 sock.close() os.unlink(path) From 7722216e47b3be7cbd686679e339f47e710aeb20 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 14:36:29 -0300 Subject: [PATCH 24/72] python linter: clean tests/bugs/distribute/overlap.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/bugs/distribute/overlap.py | 15 +++++++++++---- # 1 file changed, 11 insertions(+), 4 deletions(-) # tests/bugs/distribute/overlap.py | 15 +++++++++++---- # 1 file changed, 11 insertions(+), 4 deletions(-) --- tests/bugs/distribute/overlap.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/tests/bugs/distribute/overlap.py b/tests/bugs/distribute/overlap.py index 2813979787b..174c98f45c4 100755 --- a/tests/bugs/distribute/overlap.py +++ b/tests/bugs/distribute/overlap.py @@ -1,21 +1,27 @@ +"""Test bugs distribute overlap.""" from __future__ import print_function + import sys -def calculate_one (ov, nv): + +def calculate_one(ov, nv): + """Test for one overlapping region.""" old_start = int(ov[18:26], 16) old_end = int(ov[26:34], 16) new_start = int(nv[18:26], 16) new_end = int(nv[26:34], 16) if (new_end < old_start) or (new_start > old_end): - #print '%s, %s -> ZERO' % (ov, nv) + # print '%s, %s -> ZERO' % (ov, nv) return 0 all_start = max(old_start, new_start) all_end = min(old_end, new_end) - #print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1) + # print '%s, %s -> %08x' % (ov, nv, all_end - all_start + 1) return all_end - all_start + 1 -def calculate_all (values): + +def calculate_all(values): + """Test for all overlapping region.""" total = 0 nv_index = len(values) // 2 for old_val in values[:nv_index]: @@ -24,6 +30,7 @@ def calculate_all (values): total += calculate_one(old_val, new_val) return total + """ test1_vals = [ '0x0000000000000000000000003fffffff', # first quarter From fc8acec13826cf110bf9bf2e297f852b96fcd618 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 14:58:44 -0300 Subject: [PATCH 25/72] python linter: clean libglusterfs/src/generator.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # libglusterfs/src/generator.py | 956 +++++++++++++++++++++--------------------- # 1 file changed, 480 insertions(+), 476 deletions(-) # libglusterfs/src/generator.py | 956 +++++++++++++++++++++--------------------- # 1 file changed, 480 insertions(+), 476 deletions(-) --- libglusterfs/src/generator.py | 956 +++++++++++++++++----------------- 1 file changed, 480 insertions(+), 476 deletions(-) diff --git a/libglusterfs/src/generator.py b/libglusterfs/src/generator.py index 5b7aa4764a0..1b13b3a74c7 100755 --- a/libglusterfs/src/generator.py +++ b/libglusterfs/src/generator.py @@ -66,537 +66,537 @@ xlator_dumpops = {} ops['fgetxattr'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'name', 'const char *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'dict', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'name', 'const char *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'dict', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['fsetxattr'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'dict', 'dict_t *', 'xattr'), - ('fop-arg', 'flags', 'int32_t', 'flags'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'dict', 'dict_t *', 'xattr'), + ('fop-arg', 'flags', 'int32_t', 'flags'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['setxattr'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'dict', 'dict_t *', 'xattr'), - ('fop-arg', 'flags', 'int32_t', 'flags'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'inode-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'dict', 'dict_t *', 'xattr'), + ('fop-arg', 'flags', 'int32_t', 'flags'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'inode-op'), ) ops['statfs'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'buf', 'struct statvfs *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'buf', 'struct statvfs *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['fsyncdir'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'flags', 'int32_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'flags', 'int32_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['opendir'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'fd', 'fd_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'fd', 'fd_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['fstat'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['fsync'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'flags', 'int32_t'), - ('extra', 'preop', 'struct iatt', '&preop'), - ('extra', 'postop', 'struct iatt', '&postop'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'prebuf', 'struct iatt *'), - ('cbk-arg', 'postbuf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'flags', 'int32_t'), + ('extra', 'preop', 'struct iatt', '&preop'), + ('extra', 'postop', 'struct iatt', '&postop'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'prebuf', 'struct iatt *'), + ('cbk-arg', 'postbuf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['flush'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['writev'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'vector', 'struct iovec *', 'vector'), - ('fop-arg', 'count', 'int32_t'), - ('fop-arg', 'off', 'off_t', 'offset'), - ('fop-arg', 'flags', 'uint32_t', 'flags'), - ('fop-arg', 'iobref', 'struct iobref *'), - ('extra', 'preop', 'struct iatt', '&preop'), - ('extra', 'postop', 'struct iatt', '&postop'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'prebuf', 'struct iatt *'), - ('cbk-arg', 'postbuf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'vector', 'struct iovec *', 'vector'), + ('fop-arg', 'count', 'int32_t'), + ('fop-arg', 'off', 'off_t', 'offset'), + ('fop-arg', 'flags', 'uint32_t', 'flags'), + ('fop-arg', 'iobref', 'struct iobref *'), + ('extra', 'preop', 'struct iatt', '&preop'), + ('extra', 'postop', 'struct iatt', '&postop'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'prebuf', 'struct iatt *'), + ('cbk-arg', 'postbuf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['readv'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'size', 'size_t'), - ('fop-arg', 'offset', 'off_t'), - ('fop-arg', 'flags', 'uint32_t'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'vector', 'struct iovec *'), - ('cbk-arg', 'count', 'int32_t'), - ('cbk-arg', 'stbuf', 'struct iatt *'), - ('cbk-arg', 'iobref', 'struct iobref *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'size', 'size_t'), + ('fop-arg', 'offset', 'off_t'), + ('fop-arg', 'flags', 'uint32_t'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'vector', 'struct iovec *'), + ('cbk-arg', 'count', 'int32_t'), + ('cbk-arg', 'stbuf', 'struct iatt *'), + ('cbk-arg', 'iobref', 'struct iobref *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['open'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'flags', 'int32_t'), - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'fd', 'fd_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'flags', 'int32_t'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'fd', 'fd_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['create'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'flags', 'int32_t', 'flags'), - ('fop-arg', 'mode', 'mode_t', 'mode'), - ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'fd', 'fd_t *'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), - ('link', 'loc.inode', '&iatt'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'flags', 'int32_t', 'flags'), + ('fop-arg', 'mode', 'mode_t', 'mode'), + ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'fd', 'fd_t *'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), + ('link', 'loc.inode', '&iatt'), ) ops['link'] = ( - ('fop-arg', 'oldloc', 'loc_t *', 'loc'), - ('fop-arg', 'newloc', 'loc_t *', 'loc2'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'oldloc', 'loc_t *', 'loc'), + ('fop-arg', 'newloc', 'loc_t *', 'loc2'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['rename'] = ( - ('fop-arg', 'oldloc', 'loc_t *', 'loc'), - ('fop-arg', 'newloc', 'loc_t *', 'loc2'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preoldparent', 'struct iatt *'), - ('cbk-arg', 'postoldparent', 'struct iatt *'), - ('cbk-arg', 'prenewparent', 'struct iatt *'), - ('cbk-arg', 'postnewparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'oldloc', 'loc_t *', 'loc'), + ('fop-arg', 'newloc', 'loc_t *', 'loc2'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preoldparent', 'struct iatt *'), + ('cbk-arg', 'postoldparent', 'struct iatt *'), + ('cbk-arg', 'prenewparent', 'struct iatt *'), + ('cbk-arg', 'postnewparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['symlink'] = ( - ('fop-arg', 'linkpath', 'const char *', 'linkname'), - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'umask', 'mode_t', 'mode', 'nosync'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'linkpath', 'const char *', 'linkname'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'umask', 'mode_t', 'mode', 'nosync'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['rmdir'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'flags', 'int32_t', 'flags'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'flags', 'int32_t', 'flags'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['unlink'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'flags', 'int32_t', 'flags', 'nosync'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'flags', 'int32_t', 'flags', 'nosync'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['mkdir'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'mode', 'mode_t', 'mode'), - ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), - ('link', 'loc.inode', '&iatt'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'mode', 'mode_t', 'mode'), + ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), + ('link', 'loc.inode', '&iatt'), ) ops['mknod'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'mode', 'mode_t', 'mode'), - ('fop-arg', 'rdev', 'dev_t', 'rdev'), - ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), - ('extra', 'iatt', 'struct iatt', '&iatt'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'entry-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'mode', 'mode_t', 'mode'), + ('fop-arg', 'rdev', 'dev_t', 'rdev'), + ('fop-arg', 'umask', 'mode_t', 'umask', 'nosync'), + ('extra', 'iatt', 'struct iatt', '&iatt'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'entry-op'), ) ops['readlink'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'size', 'size_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'path', 'const char *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'size', 'size_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'path', 'const char *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['access'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'mask', 'int32_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'mask', 'int32_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['ftruncate'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'offset', 'off_t', 'offset'), - ('extra', 'preop', 'struct iatt', '&preop'), - ('extra', 'postop', 'struct iatt', '&postop'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'prebuf', 'struct iatt *'), - ('cbk-arg', 'postbuf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'offset', 'off_t', 'offset'), + ('extra', 'preop', 'struct iatt', '&preop'), + ('extra', 'postop', 'struct iatt', '&postop'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'prebuf', 'struct iatt *'), + ('cbk-arg', 'postbuf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['getxattr'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'name', 'const char *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'dict', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'name', 'const char *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'dict', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['xattrop'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'flags', 'gf_xattrop_flags_t', 'optype'), - ('fop-arg', 'dict', 'dict_t *', 'xattr'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'dict', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'inode-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'flags', 'gf_xattrop_flags_t', 'optype'), + ('fop-arg', 'dict', 'dict_t *', 'xattr'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'dict', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'inode-op'), ) ops['fxattrop'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'flags', 'gf_xattrop_flags_t', 'optype'), - ('fop-arg', 'dict', 'dict_t *', 'xattr'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'dict', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'flags', 'gf_xattrop_flags_t', 'optype'), + ('fop-arg', 'dict', 'dict_t *', 'xattr'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'dict', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['removexattr'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'name', 'const char *', 'name'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'inode-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'name', 'const char *', 'name'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'inode-op'), ) ops['fremovexattr'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'name', 'const char *', 'name'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'name', 'const char *', 'name'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['lk'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'cmd', 'int32_t'), - ('fop-arg', 'lock', 'struct gf_flock *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'lock', 'struct gf_flock *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'cmd', 'int32_t'), + ('fop-arg', 'lock', 'struct gf_flock *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'lock', 'struct gf_flock *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['inodelk'] = ( - ('fop-arg', 'volume', 'const char *'), - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'cmd', 'int32_t'), - ('fop-arg', 'lock', 'struct gf_flock *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'volume', 'const char *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'cmd', 'int32_t'), + ('fop-arg', 'lock', 'struct gf_flock *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['finodelk'] = ( - ('fop-arg', 'volume', 'const char *'), - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'cmd', 'int32_t'), - ('fop-arg', 'lock', 'struct gf_flock *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'volume', 'const char *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'cmd', 'int32_t'), + ('fop-arg', 'lock', 'struct gf_flock *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['entrylk'] = ( - ('fop-arg', 'volume', 'const char *'), - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'basename', 'const char *'), - ('fop-arg', 'cmd', 'entrylk_cmd'), - ('fop-arg', 'type', 'entrylk_type'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'volume', 'const char *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'basename', 'const char *'), + ('fop-arg', 'cmd', 'entrylk_cmd'), + ('fop-arg', 'type', 'entrylk_type'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['fentrylk'] = ( - ('fop-arg', 'volume', 'const char *'), - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'basename', 'const char *'), - ('fop-arg', 'cmd', 'entrylk_cmd'), - ('fop-arg', 'type', 'entrylk_type'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'volume', 'const char *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'basename', 'const char *'), + ('fop-arg', 'cmd', 'entrylk_cmd'), + ('fop-arg', 'type', 'entrylk_type'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['rchecksum'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'offset', 'off_t'), - ('fop-arg', 'len', 'int32_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'weak_cksum', 'uint32_t'), - ('cbk-arg', 'strong_cksum', 'uint8_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'offset', 'off_t'), + ('fop-arg', 'len', 'int32_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'weak_cksum', 'uint32_t'), + ('cbk-arg', 'strong_cksum', 'uint8_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['readdir'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'size', 'size_t'), - ('fop-arg', 'off', 'off_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'entries', 'gf_dirent_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'size', 'size_t'), + ('fop-arg', 'off', 'off_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'entries', 'gf_dirent_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['readdirp'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'size', 'size_t'), - ('fop-arg', 'off', 'off_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'entries', 'gf_dirent_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'size', 'size_t'), + ('fop-arg', 'off', 'off_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'entries', 'gf_dirent_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['setattr'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'stbuf', 'struct iatt *', 'stat'), - ('fop-arg', 'valid', 'int32_t', 'valid'), - ('extra', 'preop', 'struct iatt', '&preop'), - ('extra', 'postop', 'struct iatt', '&postop'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'statpre', 'struct iatt *'), - ('cbk-arg', 'statpost', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'inode-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'stbuf', 'struct iatt *', 'stat'), + ('fop-arg', 'valid', 'int32_t', 'valid'), + ('extra', 'preop', 'struct iatt', '&preop'), + ('extra', 'postop', 'struct iatt', '&postop'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'statpre', 'struct iatt *'), + ('cbk-arg', 'statpost', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'inode-op'), ) ops['truncate'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'offset', 'off_t', 'offset'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'prebuf', 'struct iatt *'), - ('cbk-arg', 'postbuf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'inode-op'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'offset', 'off_t', 'offset'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'prebuf', 'struct iatt *'), + ('cbk-arg', 'postbuf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'inode-op'), ) ops['stat'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['lookup'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - # We could add xdata everywhere automatically if somebody hadn't put - # something after it here. - ('cbk-arg', 'postparent', 'struct iatt *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + # We could add xdata everywhere automatically if somebody hadn't put + # something after it here. + ('cbk-arg', 'postparent', 'struct iatt *'), ) ops['fsetattr'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'stbuf', 'struct iatt *', 'stat'), - ('fop-arg', 'valid', 'int32_t', 'valid'), - ('extra', 'preop', 'struct iatt', '&preop'), - ('extra', 'postop', 'struct iatt', '&postop'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'statpre', 'struct iatt *'), - ('cbk-arg', 'statpost', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'stbuf', 'struct iatt *', 'stat'), + ('fop-arg', 'valid', 'int32_t', 'valid'), + ('extra', 'preop', 'struct iatt', '&preop'), + ('extra', 'postop', 'struct iatt', '&postop'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'statpre', 'struct iatt *'), + ('cbk-arg', 'statpost', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['fallocate'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'keep_size', 'int32_t', 'mode'), - ('fop-arg', 'offset', 'off_t', 'offset'), - ('fop-arg', 'len', 'size_t', 'size'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'pre', 'struct iatt *'), - ('cbk-arg', 'post', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'keep_size', 'int32_t', 'mode'), + ('fop-arg', 'offset', 'off_t', 'offset'), + ('fop-arg', 'len', 'size_t', 'size'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'pre', 'struct iatt *'), + ('cbk-arg', 'post', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['discard'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'offset', 'off_t', 'offset'), - ('fop-arg', 'len', 'size_t', 'size'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'pre', 'struct iatt *'), - ('cbk-arg', 'post', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'offset', 'off_t', 'offset'), + ('fop-arg', 'len', 'size_t', 'size'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'pre', 'struct iatt *'), + ('cbk-arg', 'post', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['zerofill'] = ( - ('fop-arg', 'fd', 'fd_t *', 'fd'), - ('fop-arg', 'offset', 'off_t', 'offset'), - # As e.g. fallocate/discard (above) "len" should really be a size_t. - ('fop-arg', 'len', 'off_t', 'size'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'pre', 'struct iatt *'), - ('cbk-arg', 'post', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'fd', 'fd_t *', 'fd'), + ('fop-arg', 'offset', 'off_t', 'offset'), + # As e.g. fallocate/discard (above) "len" should really be a size_t. + ('fop-arg', 'len', 'off_t', 'size'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'pre', 'struct iatt *'), + ('cbk-arg', 'post', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['ipc'] = ( - ('fop-arg', 'op', 'int32_t'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'xdata', 'dict_t *'), - ('journal', 'fd-op'), + ('fop-arg', 'op', 'int32_t'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'xdata', 'dict_t *'), + ('journal', 'fd-op'), ) ops['seek'] = ( - ('fop-arg', 'fd', 'fd_t *'), - ('fop-arg', 'offset', 'off_t'), - ('fop-arg', 'what', 'gf_seek_what_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'offset', 'off_t'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'fd', 'fd_t *'), + ('fop-arg', 'offset', 'off_t'), + ('fop-arg', 'what', 'gf_seek_what_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'offset', 'off_t'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['getspec'] = ( - ('fop-arg', 'key', 'const char *'), - ('fop-arg', 'flags', 'int32_t'), - ('cbk-arg', 'spec_data', 'char *'), + ('fop-arg', 'key', 'const char *'), + ('fop-arg', 'flags', 'int32_t'), + ('cbk-arg', 'spec_data', 'char *'), ) ops['lease'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'lease', 'struct gf_lease *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'lease', 'struct gf_lease *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'lease', 'struct gf_lease *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'lease', 'struct gf_lease *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['getactivelk'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'locklist', 'lock_migration_info_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'locklist', 'lock_migration_info_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['setactivelk'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'locklist', 'lock_migration_info_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'locklist', 'lock_migration_info_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['put'] = ( - ('fop-arg', 'loc', 'loc_t *', 'loc'), - ('fop-arg', 'mode', 'mode_t', 'mode'), - ('fop-arg', 'umask', 'mode_t', 'umask'), - ('fop-arg', 'flags', 'uint32_t', 'flags'), - ('fop-arg', 'vector', 'struct iovec *', 'vector'), - ('fop-arg', 'count', 'int32_t'), - ('fop-arg', 'off', 'off_t', 'offset'), - ('fop-arg', 'iobref', 'struct iobref *'), - ('fop-arg', 'dict', 'dict_t *', 'xattr'), - ('fop-arg', 'xdata', 'dict_t *', 'xdata'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'preparent', 'struct iatt *'), - ('cbk-arg', 'postparent', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *', 'loc'), + ('fop-arg', 'mode', 'mode_t', 'mode'), + ('fop-arg', 'umask', 'mode_t', 'umask'), + ('fop-arg', 'flags', 'uint32_t', 'flags'), + ('fop-arg', 'vector', 'struct iovec *', 'vector'), + ('fop-arg', 'count', 'int32_t'), + ('fop-arg', 'off', 'off_t', 'offset'), + ('fop-arg', 'iobref', 'struct iobref *'), + ('fop-arg', 'dict', 'dict_t *', 'xattr'), + ('fop-arg', 'xdata', 'dict_t *', 'xdata'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'preparent', 'struct iatt *'), + ('cbk-arg', 'postparent', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['icreate'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'mode', 'mode_t'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'inode', 'inode_t *'), - ('cbk-arg', 'buf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'mode', 'mode_t'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'inode', 'inode_t *'), + ('cbk-arg', 'buf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['namelink'] = ( - ('fop-arg', 'loc', 'loc_t *'), - ('fop-arg', 'xdata', 'dict_t *'), - ('cbk-arg', 'prebuf', 'struct iatt *'), - ('cbk-arg', 'postbuf', 'struct iatt *'), - ('cbk-arg', 'xdata', 'dict_t *'), + ('fop-arg', 'loc', 'loc_t *'), + ('fop-arg', 'xdata', 'dict_t *'), + ('cbk-arg', 'prebuf', 'struct iatt *'), + ('cbk-arg', 'postbuf', 'struct iatt *'), + ('cbk-arg', 'xdata', 'dict_t *'), ) ops['copy_file_range'] = ( @@ -614,164 +614,168 @@ ) ##################################################################### xlator_cbks['forget'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'inode', 'inode_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'inode', 'inode_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['release'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'fd', 'fd_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'fd', 'fd_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['releasedir'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'fd', 'fd_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'fd', 'fd_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['invalidate'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'inode', 'inode_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'inode', 'inode_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['client_destroy'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'client', 'client_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'client', 'client_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['client_disconnect'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'client', 'client_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'client', 'client_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_cbks['ictxmerge'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'fd', 'fd_t *'), - ('fn-arg', 'inode', 'inode_t *'), - ('fn-arg', 'linked_inode', 'inode_t *'), - ('ret-val', 'void', ''), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'fd', 'fd_t *'), + ('fn-arg', 'inode', 'inode_t *'), + ('fn-arg', 'linked_inode', 'inode_t *'), + ('ret-val', 'void', ''), ) ##################################################################### xlator_dumpops['priv'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['inode'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['fd'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['inodectx'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'ino', 'inode_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'ino', 'inode_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['fdctx'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'fd', 'fd_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'fd', 'fd_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['priv_to_dict'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'dict', 'dict_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'dict', 'dict_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['inode_to_dict'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'dict', 'dict_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'dict', 'dict_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['fd_to_dict'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'dict', 'dict_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'dict', 'dict_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['inodectx_to_dict'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'ino', 'inode_t *'), - ('fn-arg', 'dict', 'dict_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'ino', 'inode_t *'), + ('fn-arg', 'dict', 'dict_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['fdctx_to_dict'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('fn-arg', 'fd', 'fd_t *'), - ('fn-arg', 'dict', 'dict_t *'), - ('ret-val', 'int32_t', '0'), + ('fn-arg', 'this', 'xlator_t *'), + ('fn-arg', 'fd', 'fd_t *'), + ('fn-arg', 'dict', 'dict_t *'), + ('ret-val', 'int32_t', '0'), ) xlator_dumpops['history'] = ( - ('fn-arg', 'this', 'xlator_t *'), - ('ret-val', 'int32_t', '0'), -) - -def get_error_arg (type_str): - if type_str.find(" *") != -1: - return "NULL" - return "-1" - -def get_subs (names, types, cbktypes=None): - sdict = {} - sdict["@SHORT_ARGS@"] = ', '.join(names) - # Convert two separate tuples to one of (name, type) sub-tuples. - as_tuples = list(zip(types, names)) - # Convert each sub-tuple into a "type name" string. - as_strings = [' '.join(item) for item in as_tuples] - # Join all of those into one big string. - sdict["@LONG_ARGS@"] = ',\n\t'.join(as_strings) - # So much more readable than string.join(map(string.join,zip(...)))) - sdict["@ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, types))) - if cbktypes is not None: - sdict["@CBK_ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, cbktypes))) - return sdict - -def generate (tmpl, name, subs): - text = tmpl.replace("@NAME@", name) - if name == "writev": - # More spurious inconsistency. - text = text.replace("@UPNAME@", "WRITE") - elif name == "readv": - text = text.replace("@UPNAME@", "READ") - else: - text = text.replace("@UPNAME@", name.upper()) - for old, new in subs[name].items(): - text = text.replace(old, new) - # TBD: reindent/reformat the result for maximum readability. - return text + ('fn-arg', 'this', 'xlator_t *'), + ('ret-val', 'int32_t', '0'), +) + + +def get_error_arg(type_str): + if type_str.find(" *") != -1: + return "NULL" + return "-1" + + +def get_subs(names, types, cbktypes=None): + sdict = {} + sdict["@SHORT_ARGS@"] = ', '.join(names) + # Convert two separate tuples to one of (name, type) sub-tuples. + as_tuples = list(zip(types, names)) + # Convert each sub-tuple into a "type name" string. + as_strings = [' '.join(item) for item in as_tuples] + # Join all of those into one big string. + sdict["@LONG_ARGS@"] = ',\n\t'.join(as_strings) + # So much more readable than string.join(map(string.join,zip(...)))) + sdict["@ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, types))) + if cbktypes is not None: + sdict["@CBK_ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, cbktypes))) + return sdict + + +def generate(tmpl, name, subs): + text = tmpl.replace("@NAME@", name) + if name == "writev": + # More spurious inconsistency. + text = text.replace("@UPNAME@", "WRITE") + elif name == "readv": + text = text.replace("@UPNAME@", "READ") + else: + text = text.replace("@UPNAME@", name.upper()) + for old, new in subs[name].items(): + text = text.replace(old, new) + # TBD: reindent/reformat the result for maximum readability. + return text + fop_subs = {} cbk_subs = {} for name, args in ops.items(): - # Create the necessary substitution strings for fops. - arg_names = [ a[1] for a in args if a[0] == 'fop-arg'] - arg_types = [ a[2] for a in args if a[0] == 'fop-arg'] - cbk_types = [ a[2] for a in args if a[0] == 'cbk-arg'] - fop_subs[name] = get_subs(arg_names, arg_types, cbk_types) + # Create the necessary substitution strings for fops. + arg_names = [a[1] for a in args if a[0] == 'fop-arg'] + arg_types = [a[2] for a in args if a[0] == 'fop-arg'] + cbk_types = [a[2] for a in args if a[0] == 'cbk-arg'] + fop_subs[name] = get_subs(arg_names, arg_types, cbk_types) - # Same thing for callbacks. - arg_names = [ a[1] for a in args if a[0] == 'cbk-arg'] - arg_types = [ a[2] for a in args if a[0] == 'cbk-arg'] - cbk_subs[name] = get_subs(arg_names, arg_types) + # Same thing for callbacks. + arg_names = [a[1] for a in args if a[0] == 'cbk-arg'] + arg_types = [a[2] for a in args if a[0] == 'cbk-arg'] + cbk_subs[name] = get_subs(arg_names, arg_types) - # Callers can add other subs to these tables, or even create their - # own tables, using these same techniques, and then pass the result - # to generate() which would Do The Right Thing with them. + # Callers can add other subs to these tables, or even create their + # own tables, using these same techniques, and then pass the result + # to generate() which would Do The Right Thing with them. From 1e5d11b37a229731c6db837b73dde04c7dbe14d0 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 18:52:26 -0300 Subject: [PATCH 26/72] python linter: clean libglusterfs/src/gen-defaults.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # libglusterfs/src/gen-defaults.py | 80 +++++++++++++++++++++------------------- # 1 file changed, 42 insertions(+), 38 deletions(-) # libglusterfs/src/gen-defaults.py | 80 +++++++++++++++++++++------------------- # 1 file changed, 42 insertions(+), 38 deletions(-) --- libglusterfs/src/gen-defaults.py | 80 +++++++++++++++++--------------- 1 file changed, 42 insertions(+), 38 deletions(-) diff --git a/libglusterfs/src/gen-defaults.py b/libglusterfs/src/gen-defaults.py index e31d3a9fe8a..1fae17a4b20 100755 --- a/libglusterfs/src/gen-defaults.py +++ b/libglusterfs/src/gen-defaults.py @@ -1,37 +1,39 @@ #!/usr/bin/python3 from __future__ import print_function + import sys + from generator import ops, fop_subs, cbk_subs, generate FAILURE_CBK_TEMPLATE = """ int32_t default_@NAME@_failure_cbk (call_frame_t *frame, int32_t op_errno) { - STACK_UNWIND_STRICT (@NAME@, frame, -1, op_errno, @ERROR_ARGS@); - return 0; + STACK_UNWIND_STRICT (@NAME@, frame, -1, op_errno, @ERROR_ARGS@); + return 0; } """ CBK_RESUME_TEMPLATE = """ int32_t default_@NAME@_cbk_resume (call_frame_t *frame, void *cookie, xlator_t *this, - int32_t op_ret, int32_t op_errno, @LONG_ARGS@) + int32_t op_ret, int32_t op_errno, @LONG_ARGS@) { - STACK_UNWIND_STRICT (@NAME@, frame, op_ret, op_errno, - @SHORT_ARGS@); - return 0; + STACK_UNWIND_STRICT (@NAME@, frame, op_ret, op_errno, + @SHORT_ARGS@); + return 0; } """ CBK_TEMPLATE = """ int32_t default_@NAME@_cbk (call_frame_t *frame, void *cookie, xlator_t *this, - int32_t op_ret, int32_t op_errno, @LONG_ARGS@) + int32_t op_ret, int32_t op_errno, @LONG_ARGS@) { - STACK_UNWIND_STRICT (@NAME@, frame, op_ret, op_errno, - @SHORT_ARGS@); - return 0; + STACK_UNWIND_STRICT (@NAME@, frame, op_ret, op_errno, + @SHORT_ARGS@); + return 0; } """ @@ -39,43 +41,45 @@ int32_t default_@NAME@_resume (call_frame_t *frame, xlator_t *this, @LONG_ARGS@) { - STACK_WIND (frame, default_@NAME@_cbk, - FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@, - @SHORT_ARGS@); - return 0; + STACK_WIND (frame, default_@NAME@_cbk, + FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@, + @SHORT_ARGS@); + return 0; } """ FOP_TEMPLATE = """ int32_t default_@NAME@ ( - call_frame_t *frame, - xlator_t *this, - @LONG_ARGS@) + call_frame_t *frame, + xlator_t *this, + @LONG_ARGS@) { - STACK_WIND_TAIL (frame, - FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@, - @SHORT_ARGS@); - return 0; + STACK_WIND_TAIL (frame, + FIRST_CHILD(this), FIRST_CHILD(this)->fops->@NAME@, + @SHORT_ARGS@); + return 0; } """ -def gen_defaults (): - for name in list(ops.keys()): - print(generate(FAILURE_CBK_TEMPLATE, name, cbk_subs)) - for name in list(ops.keys()): - print(generate(CBK_RESUME_TEMPLATE, name, cbk_subs)) - for name in list(ops.keys()): - print(generate(CBK_TEMPLATE, name, cbk_subs)) - for name in list(ops.keys()): - print(generate(RESUME_TEMPLATE, name, fop_subs)) - for name in list(ops.keys()): - print(generate(FOP_TEMPLATE, name, fop_subs)) + +def gen_defaults(): + for name in list(ops.keys()): + print(generate(FAILURE_CBK_TEMPLATE, name, cbk_subs)) + for name in list(ops.keys()): + print(generate(CBK_RESUME_TEMPLATE, name, cbk_subs)) + for name in list(ops.keys()): + print(generate(CBK_TEMPLATE, name, cbk_subs)) + for name in list(ops.keys()): + print(generate(RESUME_TEMPLATE, name, fop_subs)) + for name in list(ops.keys()): + print(generate(FOP_TEMPLATE, name, fop_subs)) + for l in open(sys.argv[1], 'r').readlines(): - if l.find('#pragma generate') != -1: - print("/* BEGIN GENERATED CODE - DO NOT MODIFY */") - gen_defaults() - print("/* END GENERATED CODE */") - else: - print(l[:-1]) + if l.find('#pragma generate') != -1: + print("/* BEGIN GENERATED CODE - DO NOT MODIFY */") + gen_defaults() + print("/* END GENERATED CODE */") + else: + print(l[:-1]) From 1a1c33d6dbc37ccd362c5de0c79d1d2130335281 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:42:04 -0300 Subject: [PATCH 27/72] python linter: clean geo-replication/syncdaemon/syncdutils.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/syncdutils.py | 147 ++++++++++++++++--------------- # 1 file changed, 78 insertions(+), 69 deletions(-) # geo-replication/syncdaemon/syncdutils.py | 147 ++++++++++++++++--------------- # 1 file changed, 78 insertions(+), 69 deletions(-) --- geo-replication/syncdaemon/syncdutils.py | 147 ++++++++++++----------- 1 file changed, 78 insertions(+), 69 deletions(-) diff --git a/geo-replication/syncdaemon/syncdutils.py b/geo-replication/syncdaemon/syncdutils.py index 035bd035975..ea007cd0780 100644 --- a/geo-replication/syncdaemon/syncdutils.py +++ b/geo-replication/syncdaemon/syncdutils.py @@ -8,17 +8,17 @@ # cases as published by the Free Software Foundation. # +import errno +import fcntl +import logging import os -import sys import pwd -import time -import fcntl import shutil -import logging -import errno -import threading -import subprocess import socket +import subprocess +import sys +import threading +import time from subprocess import PIPE from threading import Lock, Thread as baseThread from errno import (EACCES, EAGAIN, EPIPE, ENOTCONN, ENOMEM, ECONNABORTED, @@ -28,14 +28,17 @@ from os import waitpid as owaitpid import xml.etree.ElementTree as XET from select import error as SelectError + try: from cPickle import PickleError except ImportError: from pickle import PickleError from conf import GLUSTERFS_LIBEXECDIR, UUID_FILE + sys.path.insert(1, GLUSTERFS_LIBEXECDIR) EVENTS_ENABLED = True + try: from gfevents.eventtypes import GEOREP_FAULTY as EVENT_GEOREP_FAULTY from gfevents.eventtypes import GEOREP_ACTIVE as EVENT_GEOREP_ACTIVE @@ -52,7 +55,6 @@ import gsyncdconfig as gconf from rconf import rconf - from hashlib import sha256 as sha256 ENOTSUP = getattr(errno, 'ENOTSUP', 'EOPNOTSUPP') @@ -74,18 +76,22 @@ final_lock = Lock() + def sup(x, *a, **kw): - """a rubyesque "super" for python ;) + """Invoke caller method in parent class with given args. - invoke caller method in parent class with given args. + a rubyesque "super" for python ;) """ return getattr(super(type(x), x), sys._getframe(1).f_code.co_name)(*a, **kw) def escape(s): - """the chosen flavor of string escaping, used all over - to turn whatever data to creatable representation""" + """Escape data to create representation. + + the chosen flavor of string escaping, used all over + to turn whatever data to creatable representation + """ return s.replace("/", "-").strip("-") @@ -100,6 +106,7 @@ def unescape_space_newline(s): .replace(NEWLINE_ESCAPE_CHAR, "\n")\ .replace(PERCENTAGE_ESCAPE_CHAR, "%") + # gf_mount_ready() returns 1 if all subvols are up, else 0 def gf_mount_ready(): ret = errno_wrap(Xattr.lgetxattr, @@ -107,21 +114,21 @@ def gf_mount_ready(): [ENOENT, ENOTSUP, ENODATA], [ENOMEM]) if isinstance(ret, int): - logging.error("failed to get the xattr value") - return 1 + logging.error("failed to get the xattr value") + return 1 ret = ret.rstrip('\x00') if ret == "1": - return 1 + return 1 return 0 + def norm(s): if s: return s.replace('-', '_') def update_file(path, updater, merger=lambda f: True): - """update a file in a transaction-like manner""" - + """Update a file in a transaction-like manner.""" fr = fw = None try: fd = os.open(path, os.O_CREAT | os.O_RDWR) @@ -151,9 +158,7 @@ def update_file(path, updater, merger=lambda f: True): def create_manifest(fname, content): - """ - Create manifest file for SSH Control Path - """ + """Create manifest file for SSH Control Path.""" fd = None try: fd = os.open(fname, os.O_CREAT | os.O_RDWR) @@ -168,12 +173,10 @@ def create_manifest(fname, content): def setup_ssh_ctl(ctld, remote_addr, resource_url): - """ - Setup GConf ssh control path parameters - """ + """Set the GConf ssh control path parameters.""" rconf.ssh_ctl_dir = ctld content = "SECONDARY_HOST=%s\nSECONDARY_RESOURCE_URL=%s" % (remote_addr, - resource_url) + resource_url) encoded_content = content.encode() content_sha256 = sha256hex(encoded_content) """ @@ -194,7 +197,7 @@ def setup_ssh_ctl(ctld, remote_addr, resource_url): def grabfile(fname, content=None): - """open @fname + contest for its fcntl lock + """Open @fname + contest for its fcntl lock. @content: if given, set the file content to it """ @@ -223,7 +226,7 @@ def grabfile(fname, content=None): def grabpidfile(fname=None, setpid=True): - """.grabfile customization for pid files""" + """.grabfile customization for pid files.""" if not fname: fname = gconf.get("pid-file") content = None @@ -233,11 +236,10 @@ def grabpidfile(fname=None, setpid=True): def finalize(*args, **kwargs): - """all those messy final steps we go trough upon termination + """Do away with pidfile, ssh control dir and logging. - Do away with pidfile, ssh control dir and logging. + all those messy final steps we go trough upon termination """ - final_lock.acquire() if gconf.get('pid_file'): rm_pidf = rconf.pid_file_owned @@ -296,13 +298,14 @@ def handle_rm_error(func, path, exc_info): def log_raise_exception(excont): - """top-level exception handler + """Log and raise exception. + + top-level exception handler Try to some fancy things to cover up we face with an error. Translate some weird sounding but well understood exceptions into human-friendly lingo """ - is_filelog = False for h in logging.getLogger().handlers: fno = getattr(getattr(h, 'stream', None), 'fileno', None) @@ -367,7 +370,6 @@ def log_raise_exception(excont): class FreeObject(object): - """wildcard class for which any attribute can be set""" def __init__(self, **kw): @@ -376,7 +378,6 @@ def __init__(self, **kw): class Thread(baseThread): - """thread class flavor for gsyncd - always a daemon thread @@ -406,9 +407,7 @@ class GsyncdError(Exception): class _MetaXattr(object): - - """singleton class, a lazy wrapper around the - libcxattr module + """singleton class, a lazy wrapper around the libcxattr module. libcxattr (a heavy import due to ctypes) is loaded only when when the single @@ -443,7 +442,8 @@ def privileged(): def boolify(s): - """ + """Convert string to boolean. + Generic string to boolean converter return @@ -471,10 +471,7 @@ def boolify(s): def eintr_wrap(func, exc, *args): - """ - wrapper around syscalls resilient to interrupt caused - by signals - """ + """Wrap around syscalls resilient to interrupt caused by signals.""" while True: try: return func(*args) @@ -563,8 +560,7 @@ def selfkill(sig=SIGTERM): def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]): - """ wrapper around calls resilient to errnos. - """ + """Wrap around calls resilient to errnos.""" nr_tries = 0 while True: try: @@ -588,6 +584,7 @@ def errno_wrap(call, arg=[], errnos=[], retry_errnos=[]): def lstat(e): return errno_wrap(os.lstat, [e], [ENOENT], [ESTALE, EBUSY]) + def get_gfid_from_mnt(gfidpath): return errno_wrap(Xattr.lgetxattr, [gfidpath, 'glusterfs.gfid.string', @@ -628,16 +625,16 @@ def gf_event(event_type, **kwargs): class GlusterLogLevel(object): - NONE = 0 - EMERG = 1 - ALERT = 2 - CRITICAL = 3 - ERROR = 4 - WARNING = 5 - NOTICE = 6 - INFO = 7 - DEBUG = 8 - TRACE = 9 + NONE = 0 + EMERG = 1 + ALERT = 2 + CRITICAL = 3 + ERROR = 4 + WARNING = 5 + NOTICE = 6 + INFO = 7 + DEBUG = 8 + TRACE = 9 def get_changelog_log_level(lvl): @@ -655,6 +652,7 @@ def get_primary_and_secondary_data_from_args(args): return (primary_name, secondary_data) + def unshare_propagation_supported(): global unshare_mnt_propagation if unshare_mnt_propagation is not None: @@ -721,7 +719,8 @@ def get_slv_dir_path(slv_host, slv_volume, gfid): # .gfid/GFID gfidpath = unescape_space_newline(os.path.join(pfx, gfid)) realpath = errno_wrap(Xattr.lgetxattr_buf, - [gfidpath, 'glusterfs.gfid2path'], [ENOENT], [ESTALE]) + [gfidpath, 'glusterfs.gfid2path'], + [ENOENT], [ESTALE]) if not isinstance(realpath, int): basename = os.path.basename(realpath).rstrip('\x00') dirpath = os.path.dirname(realpath) @@ -739,9 +738,11 @@ def get_slv_dir_path(slv_host, slv_volume, gfid): def lf(event, **kwargs): - """ + """Format event log entries. + Log Format helper function, log messages can be easily modified to structured log format. + lf("Config Change", sync_jobs=4, brick=/bricks/b1) will be converted as "Config Change [{brick=/bricks/b1}, {sync_jobs=4}]" """ @@ -752,13 +753,15 @@ def lf(event, **kwargs): class Popen(subprocess.Popen): + """class Popen. - """customized subclass of subprocess.Popen with a ring - buffer for children error output""" + A customized subclass of subprocess.Popen + with a ring buffer for children error output. + """ @classmethod def init_errhandler(cls): - """start the thread which handles children's error output""" + """Start the thread which handles children's error output.""" cls.errstore = {} def tailer(): @@ -808,14 +811,14 @@ def tailer(): @classmethod def fork(cls): - """fork wrapper that restarts errhandler thread in child""" + """Fork wrapper that restarts errhandler thread in child.""" pid = os.fork() if not pid: cls.init_errhandler() return pid def __init__(self, args, *a, **kw): - """customizations for subprocess.Popen instantiation + """Customizations for subprocess.Popen instantiation. - 'close_fds' is taken to be the default - if child's stderr is chosen to be managed, @@ -837,11 +840,11 @@ def __init__(self, args, *a, **kw): (args[0], errno.errorcode[ex.errno], os.strerror(ex.errno))) if kw.get('stderr') == subprocess.PIPE: - assert(getattr(self, 'errhandler', None)) + assert (getattr(self, 'errhandler', None)) self.errstore[self] = [] def errlog(self): - """make a log about child's failure event""" + """Make a log about child's failure event.""" logging.error(lf("command returned error", cmd=" ".join(self.args), error=self.returncode)) @@ -859,12 +862,14 @@ def logerr(l): logerr(lp) def errfail(self): - """fail nicely if child did not terminate with success""" + """Fail nicely if child did not terminate with success.""" self.errlog() finalize(exval=1) def terminate_geterr(self, fail_on_err=True): - """kill child, finalize stderr harvesting (unregister + """Terminate and get error. + + kill child, finalize stderr harvesting (unregister from errhandler, set up .elines), fail on error if asked for """ @@ -895,9 +900,13 @@ def terminate_geterr(self, fail_on_err=True): def host_brick_split(value): - """ + """Split host and brick. + IPv6 compatible way to split and get the host - and brick information. Example inputs: + and brick information. + + Example inputs: + node1.example.com:/exports/bricks/brick1/brick fe80::af0f:df82:844f:ef66%utun0:/exports/bricks/brick1/brick """ @@ -908,7 +917,6 @@ def host_brick_split(value): class Volinfo(object): - def __init__(self, vol, host='localhost', prelude=[], primary=True): if primary: gluster_cmd_dir = gconf.get("gluster-command-dir") @@ -1106,7 +1114,7 @@ def can_ssh(host, port=22): def get_up_nodes(hosts, port): - # List of hosts with Hostname/IP and UUID + """List of hosts with Hostname/IP and UUID.""" up_nodes = [] for h in hosts: if can_ssh(h[0], port): @@ -1116,7 +1124,8 @@ def get_up_nodes(hosts, port): def ssh_cipher_present(sshopts): - """ + """Check ssh cipher. + Returns True if user has defined a custom cipher to use for the SSH connection under rysnc-ssh-options. """ From 24634eb2b37aaa8a6239b5cf5a62d047d5ef1a47 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:47:19 -0300 Subject: [PATCH 28/72] python linter: clean geo-replication/syncdaemon/rconf.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/rconf.py | 1 + # 1 file changed, 1 insertion(+) # geo-replication/syncdaemon/rconf.py | 1 + # 1 file changed, 1 insertion(+) --- geo-replication/syncdaemon/rconf.py | 1 + 1 file changed, 1 insertion(+) diff --git a/geo-replication/syncdaemon/rconf.py b/geo-replication/syncdaemon/rconf.py index ff716ee4d6d..8573c1db2f3 100644 --- a/geo-replication/syncdaemon/rconf.py +++ b/geo-replication/syncdaemon/rconf.py @@ -28,4 +28,5 @@ class RConf(object): mount_point = None mbr_umount_cmd = [] + rconf = RConf() From 618e9816ba51d7760baf7c5f1d5828e8d0078be8 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:47:06 -0300 Subject: [PATCH 29/72] python linter: clean geo-replication/syncdaemon/resource.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/resource.py | 102 ++++++++++++++++----------------- # 1 file changed, 50 insertions(+), 52 deletions(-) # geo-replication/syncdaemon/resource.py | 102 ++++++++++++++++----------------- # 1 file changed, 50 insertions(+), 52 deletions(-) --- geo-replication/syncdaemon/resource.py | 102 ++++++++++++------------- 1 file changed, 50 insertions(+), 52 deletions(-) diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py index 480c2ea0ba7..af5d7f99b23 100644 --- a/geo-replication/syncdaemon/resource.py +++ b/geo-replication/syncdaemon/resource.py @@ -54,9 +54,8 @@ class Server(object): - - """singleton implemening those filesystem access primitives - which are needed for geo-replication functionality + """Singleton implemening those filesystem access primitives + which are needed for geo-replication functionality. (Singleton in the sense it's a class which has only static and classmethods and is used directly, without instantiation.) @@ -87,12 +86,13 @@ def _fmt_symlink(cls, l1, l2): return "!II%dsI%ds%ds" % (GX_GFID_CANONICAL_LEN, l1 + 1, l2 + 1) def _pathguard(f): - """decorator method that checks + """Guard against unsafe path. + + decorator method that checks the path argument of the decorated functions to make sure it does not point out of the managed tree """ - fc = funcode(f) pi = list(fc.co_varnames).index('path') @@ -109,7 +109,7 @@ def ff(*args): @classmethod @_pathguard def entries(cls, path): - """directory entries in an array""" + """Directory entries in an array.""" # prevent symlinks being followed if not stat.S_ISDIR(os.lstat(path).st_mode): raise OSError(ENOTDIR, os.strerror(ENOTDIR)) @@ -157,7 +157,7 @@ def gfid(cls, path): @classmethod @_pathguard def purge(cls, path, entries=None): - """force-delete subtrees + """Force-delete subtrees. If @entries is not specified, delete the whole subtree under @path (including @@ -206,7 +206,7 @@ def purge(cls, path, entries=None): @classmethod @_pathguard def _create(cls, path, ctor): - """path creation backend routine""" + """Path creation backend routine.""" try: ctor(path) except OSError: @@ -229,7 +229,7 @@ def symlink(cls, lnk, path): @classmethod @_pathguard def xtime(cls, path, uuid): - """query xtime extended attribute + """Query xtime extended attribute. Return xtime of @path for @uuid as a pair of integers. "Normal" errors due to non-existent @path or extended attribute @@ -252,7 +252,7 @@ def xtime(cls, path, uuid): @classmethod @_pathguard def stime_mnt(cls, path, uuid): - """query xtime extended attribute + """Query xtime extended attribute. Return xtime of @path for @uuid as a pair of integers. "Normal" errors due to non-existent @path or extended attribute @@ -275,7 +275,7 @@ def stime_mnt(cls, path, uuid): @classmethod @_pathguard def stime(cls, path, uuid): - """query xtime extended attribute + """Query xtime extended attribute. Return xtime of @path for @uuid as a pair of integers. "Normal" errors due to non-existent @path or extended attribute @@ -331,7 +331,7 @@ def node_uuid(cls, path='.'): @classmethod @_pathguard def set_stime(cls, path, uuid, mark): - """set @mark as stime for @uuid on @path""" + """Set @mark as stime for @uuid on @path.""" errno_wrap(Xattr.lsetxattr, [path, '.'.join([cls.GX_NSPACE, uuid, 'stime']), @@ -342,7 +342,7 @@ def set_stime(cls, path, uuid, mark): @classmethod @_pathguard def set_entry_stime(cls, path, uuid, mark): - """set @mark as stime for @uuid on @path""" + """Set @mark as stime for @uuid on @path.""" errno_wrap(Xattr.lsetxattr, [path, '.'.join([cls.GX_NSPACE, uuid, 'entry_stime']), @@ -353,7 +353,7 @@ def set_entry_stime(cls, path, uuid, mark): @classmethod @_pathguard def set_xtime(cls, path, uuid, mark): - """set @mark as xtime for @uuid on @path""" + """Set @mark as xtime for @uuid on @path.""" errno_wrap(Xattr.lsetxattr, [path, '.'.join([cls.GX_NSPACE, uuid, 'xtime']), @@ -364,8 +364,8 @@ def set_xtime(cls, path, uuid, mark): @classmethod @_pathguard def set_xtime_remote(cls, path, uuid, mark): - """ - set @mark as xtime for @uuid on @path + """Set @mark as xtime for @uuid on @path. + the difference b/w this and set_xtime() being set_xtime() being overloaded to set the xtime on the brick (this method sets xtime on the @@ -459,7 +459,9 @@ def collect_failure(e, cmd_ret, uid, gid, dst=False): failures = [] def recursive_rmdir(gfid, entry, path): - """disk_gfid check added for original path for which + """Delete directories recursively. + + disk_gfid check added for original path for which recursive_delete is called. This disk gfid check executed before every Unlink/Rmdir. If disk gfid is not matching with GFID from Changelog, that means other worker @@ -657,8 +659,10 @@ def rename_with_disk_gfid_confirmation(gfid, entry, en, uid, gid): blob = entry_pack_reg_stat(cls, gfid, bname, e['stat']) else: - cmd_ret = errno_wrap(os.link, [slink, en], - [ENOENT, EEXIST], [ESTALE]) + cmd_ret = errno_wrap(os.link, + [slink, en], + [ENOENT, EEXIST], + [ESTALE]) collect_failure(e, cmd_ret, uid, gid) else: st = lstat(entry) @@ -767,7 +771,7 @@ def meta_ops(cls, meta_entries): @classmethod @_pathguard def setattr(cls, path, adct): - """set file attributes + """Set file attributes. @adct is a dict, where 'own', 'mode' and 'times' keys are looked for and values used to perform @@ -791,7 +795,7 @@ def pid(): @classmethod def keep_alive(cls, dct): - """process keepalive messages. + """Process keepalive messages. Return keep-alive counter (number of received keep-alive messages). @@ -814,12 +818,11 @@ def keep_alive(cls, dct): @staticmethod def version(): - """version used in handshake""" + """Version used in handshake.""" return 1.0 class Mounter(object): - """Abstract base class for mounter backends""" def __init__(self, params): @@ -855,7 +858,7 @@ def handle_mounter(self, po): po.wait() def inhibit(self, label): - """inhibit a gluster filesystem + """Inhibit a gluster filesystem. Mount glusterfs over a temporary mountpoint, change into the mount, and lazy unmount the @@ -923,11 +926,11 @@ def inhibit(self, label): mounted = False if mntdata[-1] == 'M': mntdata = mntdata[:-1] - assert(mntdata) + assert (mntdata) mounted = True - assert(mntdata[-1] == '\0') + assert (mntdata[-1] == '\0') mntpt = mntdata[:-1] - assert(mntpt) + assert (mntpt) umount_primary = False umount_secondary = False @@ -953,7 +956,7 @@ def inhibit(self, label): rv = 200 os._exit(rv) - #Polling the dht.subvol.status value. + # Polling the dht.subvol.status value. RETRIES = 10 while not gf_mount_ready(): if RETRIES < 0: @@ -966,8 +969,7 @@ def inhibit(self, label): class DirectMounter(Mounter): - - """mounter backend which calls mount(8), umount(8) directly""" + """Mounter backend which calls mount(8), umount(8) directly.""" mountkw = {'stderr': subprocess.PIPE, 'universal_newlines': True} glusterprog = 'glusterfs' @@ -989,8 +991,7 @@ def cleanup_mntpt(self, mntpt=None): class MountbrokerMounter(Mounter): - - """mounter backend using the mountbroker gluster service""" + """Mounter backend using the mountbroker gluster service.""" mountkw = {'stderr': subprocess.PIPE, 'stdout': subprocess.PIPE, 'universal_newlines': True} @@ -1026,12 +1027,11 @@ def handle_mounter(self, po): class GLUSTERServer(Server): - - "server enhancements for a glusterfs backend""" + """Server enhancements for a glusterfs backend.""" @classmethod def _attr_unpack_dict(cls, xattr, extra_fields=''): - """generic volume mark fetching/parsing backed""" + """Generic volume mark fetching/parsing backend.""" fmt_string = cls.NTV_FMTSTR + extra_fields buf = Xattr.lgetxattr('.', xattr, struct.calcsize(fmt_string)) buf = str_to_bytearray(buf) @@ -1052,7 +1052,7 @@ def _attr_unpack_dict(cls, xattr, extra_fields=''): @classmethod def foreign_volume_infos(cls): - """return list of valid (not expired) foreign volume marks""" + """Return list of valid (not expired) foreign volume marks.""" dict_list = [] xattr_list = Xattr.llistxattr_buf('.') for ele in xattr_list: @@ -1074,7 +1074,7 @@ def foreign_volume_infos(cls): @classmethod def native_volume_info(cls): - """get the native volume mark of the underlying gluster volume""" + """Get the native volume mark of the underlying gluster volume.""" try: return cls._attr_unpack_dict('.'.join([cls.GX_NSPACE, 'volume-mark'])) @@ -1085,8 +1085,7 @@ def native_volume_info(cls): class GLUSTER(object): - - """scheme class for gluster:// urls + """Scheme class for gluster:// urls. can be used to represent a gluster secondary server on secondary side, or interface to a remote gluster @@ -1094,6 +1093,7 @@ class GLUSTER(object): (secondary-ish features come from the mixins, primary functionality is outsourced to GPrimary from primary) """ + server = GLUSTERServer def __init__(self, host, volume): @@ -1107,13 +1107,12 @@ def __init__(self, host, volume): slv_host = self.host def connect(self): - """inhibit the resource beyond + """Inhibit the resource beyond. Choose mounting backend (direct or mountbroker), set up glusterfs parameters and perform the mount with given backend """ - logging.info("Mounting gluster volume locally...") t0 = time.time() label = gconf.get('mountbroker', None) @@ -1140,14 +1139,13 @@ def connect(self): duration="%.4f" % (time.time() - t0))) def gprimary_instantiate_tuple(self, secondary): - """return a tuple of the 'one shot' and the 'main crawl' - class instance""" + """Return a tuple of the 'one shot' and the 'main crawl' class instance.""" return (gprimary_builder('xsync')(self, secondary), gprimary_builder()(self, secondary), gprimary_builder('changeloghistory')(self, secondary)) def service_loop(self, secondary=None): - """enter service loop + """Enter service loop. - if secondary given, instantiate GPrimary and pass control to that instance, which implements @@ -1197,12 +1195,12 @@ def entries(cls, path): @classmethod def lstat(cls, e): - """ path based backend stat """ + """Path based backend stat.""" return super(brickserver, cls).lstat(e) @classmethod def gfid(cls, e): - """ path based backend gfid fetch """ + """Path based backend gfid fetch.""" return super(brickserver, cls).gfid(e) @classmethod @@ -1321,8 +1319,7 @@ def linkto_check(cls, e): class SSH(object): - - """scheme class for ssh:// urls + """Scheme class for ssh:// urls. interface to remote secondary on primary side implementing an ssh based proxy @@ -1343,7 +1340,7 @@ def parse_ssh_address(self): return {'user': u, 'host': h} def start_fd_client(self, i, o): - """set up RePCe client, handshake with server + """Set up RePCe client, handshake with server. It's cut out as a separate method to let subclasses hook into client startup @@ -1364,7 +1361,7 @@ def start_fd_client(self, i, o): self.secondaryurl = ':'.join([self.remote_addr, secondarypath]) def connect_remote(self): - """connect to inner secondary url through outer ssh url + """Connect to inner secondary url through outer ssh url. Wrap the connecting utility in ssh. @@ -1437,7 +1434,7 @@ def connect_remote(self): duration="%.4f" % (time.time() - t0))) def rsync(self, files, *args, **kw): - """invoke rsync""" + """Invoke rsync.""" if not files: raise GsyncdError("no files to sync") logging.debug("files: " + ", ".join(files)) @@ -1536,7 +1533,8 @@ def rsync(self, files, *args, **kw): return po def tarssh(self, files, log_err=False): - """invoke tar+ssh + """Invoke tar+ssh. + -z (compress) can be use if needed, but omitting it now as it results in weird error (tar+ssh errors out (errcode: 2) """ From b9f2831a5ff3be271d6d18c3ce2d300423ae5301 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:22:23 -0300 Subject: [PATCH 30/72] python linter: clean geo-replication/syncdaemon/primary.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/primary.py | 179 ++++++++++++++++------------------ # 1 file changed, 83 insertions(+), 96 deletions(-) # geo-replication/syncdaemon/primary.py | 179 ++++++++++++++++------------------ # 1 file changed, 83 insertions(+), 96 deletions(-) --- geo-replication/syncdaemon/primary.py | 179 ++++++++++++-------------- 1 file changed, 83 insertions(+), 96 deletions(-) diff --git a/geo-replication/syncdaemon/primary.py b/geo-replication/syncdaemon/primary.py index 8802924619b..b7069e56bda 100644 --- a/geo-replication/syncdaemon/primary.py +++ b/geo-replication/syncdaemon/primary.py @@ -8,18 +8,18 @@ # cases as published by the Free Software Foundation. # +import errno +import fcntl +import logging import os -import sys -import time import stat -import logging -import fcntl import string -import errno +import sys import tarfile -from errno import ENOENT, ENODATA, EEXIST, EACCES, EAGAIN, ESTALE, EINTR -from threading import Condition, Lock +import time +from errno import EACCES, EAGAIN, EEXIST, EINTR, ENODATA, ENOENT, ESTALE from datetime import datetime +from threading import Condition, Lock import gsyncdconfig as gconf import libgfchangelog @@ -88,8 +88,7 @@ def edct(op, **ed): # The API! def gprimary_builder(excrawl=None): - """produce the GPrimary class variant corresponding - to sync mode""" + """Produce the GPrimary class variant corresponding to sync mode.""" this = sys.modules[__name__] modemixin = gconf.get("special-sync-mode") if not modemixin: @@ -123,7 +122,7 @@ def gprimary_builder(excrawl=None): syncengine = RsyncEngine class _GPrimary(crawlmixin, modemixin, sendmarkmixin, - purgemixin, syncengine): + purgemixin, syncengine): pass return _GPrimary @@ -134,8 +133,7 @@ class _GPrimary(crawlmixin, modemixin, sendmarkmixin, # sync modes class NormalMixin(object): - - """normal geo-rep behavior""" + """Normal geo-rep behavior.""" minus_infinity = URXTIME @@ -220,7 +218,6 @@ def set_secondary_xtime(self, path, mark): class PartialMixin(NormalMixin): - """a variant tuned towards operation with a primary that has partial info of the secondary (brick typically)""" @@ -229,7 +226,6 @@ def xtime_reversion_hook(self, path, xtl, xtr): class RecoverMixin(NormalMixin): - """a variant that differs from normal in terms of ignoring non-indexed files""" @@ -274,9 +270,10 @@ def purge_missing(self, path, names): class TarSSHEngine(object): + """class TarSSHEngine. - """Sync engine that uses tar(1) piped over ssh(1) - for data transfers. Good for lots of small files. + Sync engine that uses tar(1) piped over ssh(1) + for data transfers. Good for lots of small files. """ def a_syncdata(self, files): @@ -310,8 +307,7 @@ def syncdata(self, files): class RsyncEngine(object): - - """Sync engine that uses rsync(1) for data transfers""" + """Sync engine that uses rsync(1) for data transfers.""" def a_syncdata(self, files): logging.debug(lf("files", files=files)) @@ -345,8 +341,7 @@ def syncdata(self, files): class GPrimaryCommon(object): - - """abstract class impementling primary role""" + """Abstract class impementling primary role.""" KFGN = 0 KNAT = 1 @@ -389,7 +384,7 @@ def get_data_stime(self): return data def xtime(self, path, *a, **opts): - """get amended xtime + """Get amended xtime. as of amending, we can create missing xtime, or determine a valid value if what we get is expired @@ -436,7 +431,7 @@ def __init__(self, primary, secondary): self.unlinked_gfids = set() def init_keep_alive(cls): - """start the keep-alive thread """ + """Start the keep-alive thread.""" timo = gconf.get("secondary-timeout", 0) if timo > 0: def keep_alive(): @@ -448,8 +443,7 @@ def keep_alive(): t.start() def mgmt_lock(self): - - """Take management volume lock """ + """Take management volume lock.""" if rconf.mgmt_lock_fd: try: fcntl.lockf(rconf.mgmt_lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) @@ -609,7 +603,7 @@ def crawlwrap(self, oneshot=False, register_time=None): @staticmethod def humantime(*tpair): - """format xtime-like (sec, nsec) pair to human readable format""" + """Format xtime-like (sec, nsec) pair to human readable format.""" ts = datetime.fromtimestamp(float('.'.join(str(n) for n in tpair))).\ strftime("%Y-%m-%d %H:%M:%S") if len(tpair) > 1: @@ -636,18 +630,18 @@ def _crawl_time_format(self, crawl_time): return date def add_job(self, path, label, job, *a, **kw): - """insert @job function to job table at @path with @label""" + """Insert @job function to job table at @path with @label.""" if self.jobtab.get(path) is None: self.jobtab[path] = [] self.jobtab[path].append((label, a, lambda: job(*a, **kw))) def add_failjob(self, path, label): - """invoke .add_job with a job that does nothing just fails""" + """Invoke .add_job with a job that does nothing just fails.""" logging.debug('salvaged: ' + label) self.add_job(path, label, lambda: False) def wait(self, path, *args): - """perform jobs registered for @path + """Perform jobs registered for @path. Reset jobtab entry for @path, determine success as the conjunction of @@ -665,7 +659,7 @@ def wait(self, path, *args): return succeed def sendmark(self, path, mark, adct=None): - """update secondary side xtime for @path to primary side xtime + """Update secondary side xtime for @path to primary side xtime. also can send a setattr payload (see Server.setattr). """ @@ -684,8 +678,7 @@ def __init__(self, st_uid, st_gid, st_mode, st_atime, st_mtime): class GPrimaryChangelogMixin(GPrimaryCommon): - - """ changelog based change detection and syncing """ + """Changelog based change detection and syncing.""" # index for change type and entry IDX_START = 0 @@ -814,9 +807,9 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): # Takes care of scenarios with no hardlinks if isinstance(st, int) and st == ENOENT: logging.debug(lf('Entry not present on primary. Fixing gfid ' - 'mismatch in secondary. Deleting the entry', - retry_count=retry_count, - entry=repr(failure))) + 'mismatch in secondary. Deleting the entry', + retry_count=retry_count, + entry=repr(failure))) # Add deletion to fix_entry_ops list if failure[2]['secondary_isdir']: fix_entry_ops.append( @@ -846,9 +839,9 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): # Safe to ignore the failure as primary contains same # file with same gfid. Remove entry from entries list logging.debug(lf('Fixing gfid mismatch in secondary. ' - ' Safe to ignore, take out entry', - retry_count=retry_count, - entry=repr(failure))) + ' Safe to ignore, take out entry', + retry_count=retry_count, + entry=repr(failure))) remove_gfids.add(failure[0]['gfid']) if op == 'RENAME': fix_entry_ops.append( @@ -868,17 +861,17 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): realpath.split('/')[-1]) src_entry = pbname logging.debug(lf('Fixing dir name/gfid mismatch in ' - 'secondary', retry_count=retry_count, - entry=repr(failure))) + 'secondary', retry_count=retry_count, + entry=repr(failure))) if src_entry == dst_entry: # Safe to ignore the failure as primary contains # same directory as in secondary with same gfid. # Remove the failure entry from entries list logging.debug(lf('Fixing dir name/gfid mismatch' - ' in secondary. Safe to ignore, ' - 'take out entry', - retry_count=retry_count, - entry=repr(failure))) + ' in secondary. Safe to ignore, ' + 'take out entry', + retry_count=retry_count, + entry=repr(failure))) try: entries.remove(failure[0]) except ValueError: @@ -889,9 +882,9 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): entry1=dst_entry, stat=st, link=None) logging.debug(lf('Fixing dir name/gfid mismatch' - ' in secondary. Renaming', - retry_count=retry_count, - entry=repr(rename_dict))) + ' in secondary. Renaming', + retry_count=retry_count, + entry=repr(rename_dict))) fix_entry_ops.append(rename_dict) else: # A hardlink file exists with different name or @@ -899,9 +892,9 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): # matching_disk_gfid check that the entry doesn't # exist with same gfid so we can safely delete on secondary logging.debug(lf('Fixing file gfid mismatch in secondary. ' - 'Hardlink/Rename Case. Deleting entry', - retry_count=retry_count, - entry=repr(failure))) + 'Hardlink/Rename Case. Deleting entry', + retry_count=retry_count, + entry=repr(failure))) fix_entry_ops.append( edct('UNLINK', gfid=failure[2]['secondary_gfid'], @@ -918,19 +911,19 @@ def fix_possible_entry_failures(self, failures, retry_count, entries): # parent directory. if isinstance(st, int): logging.debug(lf('Fixing ENOENT error in secondary. Parent ' - 'does not exist on primary. Safe to ' - 'ignore, take out entry', - retry_count=retry_count, - entry=repr(failure))) + 'does not exist on primary. Safe to ' + 'ignore, take out entry', + retry_count=retry_count, + entry=repr(failure))) try: entries.remove(failure[0]) except ValueError: pass else: logging.debug(lf('Fixing ENOENT error in secondary. Create ' - 'parent directory on secondary.', - retry_count=retry_count, - entry=repr(failure))) + 'parent directory on secondary.', + retry_count=retry_count, + entry=repr(failure))) realpath = os.readlink(os.path.join(rconf.args.local_path, ".glusterfs", pargfid[0:2], @@ -1086,24 +1079,24 @@ def process_change(self, change, done, retry): if ty in ['MKNOD']: mode = int(ec[2]) if mode & 0o1000: - # Avoid stat'ing the file as it - # may be deleted in the interim - st = FreeObject(st_mode=int(ec[2]), - st_uid=int(ec[3]), - st_gid=int(ec[4]), - st_atime=0, - st_mtime=0) - - # So, it may be deleted, but still we are - # append LINK? Because, the file will be - # CREATED if source not exists. - entries.append(edct('LINK', stat=st, entry=en, - gfid=gfid)) - - # Here, we have the assumption that only - # tier-gfid.linkto causes this mknod. Add data - datas.add(os.path.join(pfx, ec[0])) - continue + # Avoid stat'ing the file as it + # may be deleted in the interim + st = FreeObject(st_mode=int(ec[2]), + st_uid=int(ec[3]), + st_gid=int(ec[4]), + st_atime=0, + st_mtime=0) + + # So, it may be deleted, but still we are + # append LINK? Because, the file will be + # CREATED if source not exists. + entries.append(edct('LINK', stat=st, entry=en, + gfid=gfid)) + + # Here, we have the assumption that only + # tier-gfid.linkto causes this mknod. Add data + datas.add(os.path.join(pfx, ec[0])) + continue # stat info. present in the changelog itself entries.append(edct(ty, gfid=gfid, entry=en, @@ -1244,7 +1237,7 @@ def process_change(self, change, done, retry): # don't need to keep iterating because we'll get the same # result in all other attempts. if ((num_entries == len(entries)) and - (num_failures == len(failures))): + (num_failures == len(failures))): logging.info(lf("No more gfid mismatches can be fixed", entries=num_entries, failures=num_failures)) @@ -1461,8 +1454,8 @@ def process(self, changes, done=1): def upd_entry_stime(self, stime): self.secondary.server.set_entry_stime(self.FLAT_DIR_HIERARCHY, - self.uuid, - stime) + self.uuid, + stime) def upd_stime(self, stime, path=None): if not path: @@ -1565,7 +1558,7 @@ def crawl(self): end_time = int(time.time()) - #as start of historical crawl marks Geo-rep worker restart + # as start of historical crawl marks Geo-rep worker restart if gconf.get("ignore-deletes"): logging.info(lf('ignore-deletes config option is set', stime=data_stime)) @@ -1639,7 +1632,6 @@ def crawl(self): class GPrimaryXsyncMixin(GPrimaryChangelogMixin): - """ This crawl needs to be xtime based (as of now it's not. this is because we generate CHANGELOG @@ -1677,10 +1669,8 @@ def register(self, register_time=None, status=None): if f.startswith("XSYNC-CHANGELOG"): os.remove(os.path.join(self.tempdir, f)) - def crawl(self): - """ - event dispatcher thread + """Event dispatcher thread. this thread dispatches either changelog or synchronizes stime. additionally terminates itself on receiving a 'finale' event @@ -1742,7 +1732,7 @@ def put(self, mark, item): self.comlist.append((mark, item)) def sync_xsync(self, last): - """schedule a processing of changelog""" + """Schedule a processing of changelog.""" self.close() if self.counter > 0: self.put('xsync', self.fname()) @@ -1751,7 +1741,7 @@ def sync_xsync(self, last): time.sleep(1) # make sure changelogs are 1 second apart def sync_stime(self, stime=None, last=False): - """schedule a stime synchronization""" + """Schedule a stime synchronization.""" if stime: self.put('stime', stime) if last: @@ -1775,8 +1765,7 @@ def is_sticky(self, path, mo): return sticky def Xcrawl(self, path='.', xtr_root=None): - """ - generate a CHANGELOG file consumable by process_change. + """Generate a CHANGELOG file consumable by process_change. secondary's xtime (stime) is _cached_ for comparisons across the filesystem tree, but set after directory synchronization. @@ -1906,8 +1895,7 @@ class BoxClosedErr(Exception): class PostBox(list): - - """synchronized collection for storing things thought of as "requests" """ + """Synchronized collection for storing things thought of as "requests".""" def __init__(self, *a): list.__init__(self, *a) @@ -1919,7 +1907,7 @@ def __init__(self, *a): self.done = False def wait(self): - """wait on requests to be processed""" + """Wait on requests to be processed.""" self.lever.acquire() if not self.done: self.lever.wait() @@ -1927,7 +1915,7 @@ def wait(self): return self.result def wakeup(self, data): - """wake up requestors with the result""" + """Wake up requestors with the result.""" self.result = data self.lever.acquire() self.done = True @@ -1935,7 +1923,7 @@ def wakeup(self, data): self.lever.release() def append(self, e): - """post a request""" + """Post a request.""" self.lever.acquire() if not self.open: raise BoxClosedErr @@ -1943,15 +1931,14 @@ def append(self, e): self.lever.release() def close(self): - """prohibit the posting of further requests""" + """Prohibit the posting of further requests.""" self.lever.acquire() self.open = False self.lever.release() class Syncer(object): - - """a staged queue to relay rsync requests to rsync workers + """A staged queue to relay rsync requests to rsync workers. By "staged queue" its meant that when a consumer comes to the queue, it takes _all_ entries, leaving the queue empty. @@ -1981,7 +1968,7 @@ class Syncer(object): """ def __init__(self, secondary, sync_engine, resilient_errnos=[]): - """spawn worker threads""" + """Spawn worker threads.""" self.log_err = False self.secondary = secondary self.lock = Lock() @@ -1993,7 +1980,7 @@ def __init__(self, secondary, sync_engine, resilient_errnos=[]): t.start() def syncjob(self, job_id): - """the life of a worker""" + """Live the life of a worker.""" while True: pb = None while True: From eea40ecb7d43b074de986f5e14a17c897bd7142b Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:42:44 -0300 Subject: [PATCH 31/72] python linter: clean geo-replication/syncdaemon/libcxattr.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/libcxattr.py | 8 +++++--- # 1 file changed, 5 insertions(+), 3 deletions(-) # geo-replication/syncdaemon/libcxattr.py | 8 +++++--- # 1 file changed, 5 insertions(+), 3 deletions(-) --- geo-replication/syncdaemon/libcxattr.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/geo-replication/syncdaemon/libcxattr.py b/geo-replication/syncdaemon/libcxattr.py index e6406c36bd7..1a4e2cb6e65 100644 --- a/geo-replication/syncdaemon/libcxattr.py +++ b/geo-replication/syncdaemon/libcxattr.py @@ -9,14 +9,16 @@ # import os + from ctypes import CDLL, get_errno from py2py3 import (bytearray_to_str, gr_create_string_buffer, gr_query_xattr, gr_lsetxattr, gr_lremovexattr) class Xattr(object): + """class Xattr. - """singleton that wraps the extended attributes system + singleton that wraps the extended attributes system interface for python using ctypes Just implement it to the degree we need it, in particular @@ -60,7 +62,7 @@ def lgetxattr(cls, path, attr, siz=0): @classmethod def lgetxattr_buf(cls, path, attr): - """lgetxattr variant with size discovery""" + """Use lgetxattr variant with size discovery""" size = cls.lgetxattr(path, attr) if size == -1: cls.raise_oserr() @@ -90,7 +92,7 @@ def lremovexattr(cls, path, attr): @classmethod def llistxattr_buf(cls, path): - """listxattr variant with size discovery""" + """Use listxattr variant with size discovery.""" try: # Assuming no more than 100 xattrs in a file/directory and # each xattr key length will be less than 256 bytes From 409aabf717bbda1d7c050cb366013dfc757e178a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:28:02 -0300 Subject: [PATCH 32/72] python linter: clean geo-replication/syncdaemon/gsyncdconfig.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/gsyncdconfig.py | 23 ++++++++++++----------- # 1 file changed, 12 insertions(+), 11 deletions(-) # geo-replication/syncdaemon/gsyncdconfig.py | 23 ++++++++++++----------- # 1 file changed, 12 insertions(+), 11 deletions(-) --- geo-replication/syncdaemon/gsyncdconfig.py | 23 +++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/geo-replication/syncdaemon/gsyncdconfig.py b/geo-replication/syncdaemon/gsyncdconfig.py index aefc4568840..f1859270930 100644 --- a/geo-replication/syncdaemon/gsyncdconfig.py +++ b/geo-replication/syncdaemon/gsyncdconfig.py @@ -13,13 +13,13 @@ from ConfigParser import RawConfigParser, NoSectionError except ImportError: from configparser import RawConfigParser, NoSectionError + import os import shutil -from string import Template from datetime import datetime +from string import Template from threading import Lock - # Global object which can be used in other modules # once load_config is called _gconf = {} @@ -165,7 +165,6 @@ def check(self, name, value=None, with_conffile=True): if value is not None and not self._is_valid_value(name, value): raise GconfInvalidValue() - def _load_with_lock(self): with self.lock: self._load() @@ -337,13 +336,14 @@ def _is_valid_value(self, name, value): def _is_config_changed(self): if self.custom_conf_file is not None and \ os.path.exists(self.custom_conf_file): - st = os.lstat(self.custom_conf_file) - if st.st_mtime > self.prev_mtime: - self.prev_mtime = st.st_mtime - return True + st = os.lstat(self.custom_conf_file) + if st.st_mtime > self.prev_mtime: + self.prev_mtime = st.st_mtime + return True return False + def is_config_file_old(config_file, primaryvol, secondaryvol): cnf = RawConfigParser() cnf.read(config_file) @@ -353,18 +353,19 @@ def is_config_file_old(config_file, primaryvol, secondaryvol): except NoSectionError: return None + def config_upgrade(config_file, ret): config_file_backup = os.path.join(os.path.dirname(config_file), "gsyncd.conf.bkp") - #copy old config file in a backup file + # copy old config file in a backup file shutil.copyfile(config_file, config_file_backup) - #write a new config file + # write a new config file config = RawConfigParser() config.add_section('vars') for key, value in ret.items(): - #handle option name changes + # handle option name changes if key == "use_tarssh": new_key = "sync-method" if value == "true": @@ -375,7 +376,7 @@ def config_upgrade(config_file, ret): elif key == "timeout": new_key = "secondary-timeout" config.set('vars', new_key, value) - #for changes like: ignore_deletes to ignore-deletes + # for changes like: ignore_deletes to ignore-deletes else: new_key = key.replace("_", "-") config.set('vars', new_key, value) From 48caa881a3af481bb2cac72db44e88160494acfb Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:27:21 -0300 Subject: [PATCH 33/72] python linter: clean geo-replication/syncdaemon/gsyncd.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/gsyncd.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) # geo-replication/syncdaemon/gsyncd.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) --- geo-replication/syncdaemon/gsyncd.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/geo-replication/syncdaemon/gsyncd.py b/geo-replication/syncdaemon/gsyncd.py index 136c43e8fef..c0faf94ed65 100644 --- a/geo-replication/syncdaemon/gsyncd.py +++ b/geo-replication/syncdaemon/gsyncd.py @@ -217,7 +217,7 @@ def main(): # If an subcmd accepts config file then it also accepts # primary and Secondary arguments. if config_file is None and hasattr(args, "config_file") \ - and args.subcmd != "secondary": + and args.subcmd != "secondary": config_file = "%s/geo-replication/%s_%s_%s/gsyncd.conf" % ( GLUSTERD_WORKDIR, args.primary, @@ -245,7 +245,7 @@ def main(): args.subcmd in ["monitor", "config-get", "config-set", "config-reset"]: ret = gconf.is_config_file_old(config_file, args.primary, extra_tmpl_args["secondaryvol"]) if ret is not None: - gconf.config_upgrade(config_file, ret) + gconf.config_upgrade(config_file, ret) # Load Config file gconf.load(GLUSTERFS_CONFDIR + "/gsyncd.conf", From 72c9d6bd86a40478fc088f252d1b458bd623720f Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:26:02 -0300 Subject: [PATCH 34/72] python linter: clean geo-replication/syncdaemon/__codecheck.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/__codecheck.py | 1 + # 1 file changed, 1 insertion(+) # geo-replication/syncdaemon/__codecheck.py | 1 + # 1 file changed, 1 insertion(+) --- geo-replication/syncdaemon/__codecheck.py | 1 + 1 file changed, 1 insertion(+) diff --git a/geo-replication/syncdaemon/__codecheck.py b/geo-replication/syncdaemon/__codecheck.py index 9437147f7d9..08fc0f0446e 100644 --- a/geo-replication/syncdaemon/__codecheck.py +++ b/geo-replication/syncdaemon/__codecheck.py @@ -9,6 +9,7 @@ # from __future__ import print_function + import os import os.path import sys From ea48099554dbdfc22b154a721d27ebcafa75c5fb Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:04:58 -0300 Subject: [PATCH 35/72] python linter: clean geo-replication/src/peer_georep-sshkey.py.in This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/src/peer_georep-sshkey.py.in | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # geo-replication/src/peer_georep-sshkey.py.in | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- geo-replication/src/peer_georep-sshkey.py.in | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geo-replication/src/peer_georep-sshkey.py.in b/geo-replication/src/peer_georep-sshkey.py.in index 58696e9a616..80ade1f72a0 100644 --- a/geo-replication/src/peer_georep-sshkey.py.in +++ b/geo-replication/src/peer_georep-sshkey.py.in @@ -109,7 +109,7 @@ class GenCmd(Cmd): with open(COMMON_SECRET_FILE, "w") as f: f.write("\n".join(common_secrets) + "\n") - print (table) + print(table) if __name__ == "__main__": From 2ea662b6f7efb6d10df40041a8bf31d8294b85ce Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:05:00 -0300 Subject: [PATCH 36/72] python linter: clean geo-replication/src/peer_mountbroker.py.in This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/src/peer_mountbroker.py.in | 3 ++- # 1 file changed, 2 insertions(+), 1 deletion(-) # geo-replication/src/peer_mountbroker.py.in | 3 ++- # 1 file changed, 2 insertions(+), 1 deletion(-) --- geo-replication/src/peer_mountbroker.py.in | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/geo-replication/src/peer_mountbroker.py.in b/geo-replication/src/peer_mountbroker.py.in index 3228293acf6..da096921ed4 100644 --- a/geo-replication/src/peer_mountbroker.py.in +++ b/geo-replication/src/peer_mountbroker.py.in @@ -125,7 +125,7 @@ class MountbrokerUserMgmt(object): pass def info(self): - # Convert Volumes set into Volumes list + "Convert Volumes set into Volumes list." users = {} for k, v in self.user_volumes.items(): users[k] = list(v) @@ -397,5 +397,6 @@ class CliRemove(Cmd): print(table) + if __name__ == "__main__": runcli() From 7427f0458f16ff13c59ef9efeaea30b712630a27 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:23:51 -0300 Subject: [PATCH 37/72] python linter: clean geo-replication/setup.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/setup.py | 3 ++- # 1 file changed, 2 insertions(+), 1 deletion(-) # geo-replication/setup.py | 3 ++- # 1 file changed, 2 insertions(+), 1 deletion(-) --- geo-replication/setup.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/geo-replication/setup.py b/geo-replication/setup.py index 0eae469d2d6..a486230f0d0 100644 --- a/geo-replication/setup.py +++ b/geo-replication/setup.py @@ -8,7 +8,8 @@ # cases as published by the Free Software Foundation. # -""" +"""Geo-replication setup. + This setup.py only used to run tests, since geo-replication will be installed in /usr/local/libexec/glusterfs or /usr/libexec/glusterfs """ From e7226c238c2b35fcf93ddcc35b5c7f97cbbed6a0 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:48:20 -0300 Subject: [PATCH 38/72] python linter: clean extras/snap_scheduler/snap_scheduler.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/snap_scheduler/snap_scheduler.py | 118 +++++++++++++++++--------------- # 1 file changed, 64 insertions(+), 54 deletions(-) # extras/snap_scheduler/snap_scheduler.py | 118 +++++++++++++++++--------------- # 1 file changed, 64 insertions(+), 54 deletions(-) --- extras/snap_scheduler/snap_scheduler.py | 118 +++++++++++++----------- 1 file changed, 64 insertions(+), 54 deletions(-) diff --git a/extras/snap_scheduler/snap_scheduler.py b/extras/snap_scheduler/snap_scheduler.py index e8fcc449a9b..83ba39a13f8 100755 --- a/extras/snap_scheduler/snap_scheduler.py +++ b/extras/snap_scheduler/snap_scheduler.py @@ -9,17 +9,19 @@ # cases as published by the Free Software Foundation. from __future__ import print_function -import subprocess -import os -import os.path -import logging import argparse import fcntl +import logging import logging.handlers -import sys +import os +import os.path import shutil +import subprocess +import sys from errno import EEXIST + from conf import GLUSTERFS_LIBEXECDIR + sys.path.insert(1, GLUSTERFS_LIBEXECDIR) EVENTS_ENABLED = True @@ -67,7 +69,7 @@ SCRIPT_NAME = "snap_scheduler" scheduler_enabled = False log = logging.getLogger(SCRIPT_NAME) -SHARED_STORAGE_DIR="/run/gluster/shared_storage" +SHARED_STORAGE_DIR = "/run/gluster/shared_storage" GCRON_DISABLED = SHARED_STORAGE_DIR+"/snaps/gcron_disabled" GCRON_ENABLED = SHARED_STORAGE_DIR+"/snaps/gcron_enabled" GCRON_TASKS = SHARED_STORAGE_DIR+"/snaps/glusterfs_snap_cron_tasks" @@ -98,7 +100,8 @@ INVALID_ARG = 16 VOLUME_DOES_NOT_EXIST = 17 -def print_error (error_num): + +def print_error(error_num): if error_num == INTERNAL_ERROR: return "Internal Error" elif error_num == SHARED_STORAGE_DIR_DOESNT_EXIST: @@ -134,6 +137,7 @@ def print_error (error_num): elif error_num == VOLUME_DOES_NOT_EXIST: return "The volume does not exist." + def output(msg): print("%s: %s" % (SCRIPT_NAME, msg)) @@ -178,6 +182,7 @@ def scheduler_status(): return ret + def enable_scheduler(): ret = scheduler_status() if ret == 0: @@ -388,6 +393,7 @@ def write_tasks_to_file(): return ret + def update_current_scheduler(data): try: with open(TMP_FILE, "w", 0o644) as f: @@ -429,7 +435,7 @@ def isVolumePresent(volname): log.error("Command output:") log.error(err) else: - success = True; + success = True return success @@ -547,6 +553,7 @@ def edit_schedules(jobname, schedule, volname): return ret + def get_bool_val(): getsebool_cli = ["getsebool", "-a"] @@ -573,6 +580,7 @@ def get_bool_val(): return bool_val + def get_selinux_status(): getenforce_cli = ["getenforce"] log.debug("Running command '%s'", " ".join(getenforce_cli)) @@ -581,8 +589,7 @@ def get_selinux_status(): p1 = subprocess.Popen(getenforce_cli, stdout=subprocess.PIPE, stderr=subprocess.PIPE) except OSError as oserr: - log.error("Failed to run the command \"getenforce\". Error: %s" %\ - oserr) + log.error("Failed to run the command \"getenforce\". Error: %s" % oserr) return -1 output, err = p1.communicate() @@ -593,11 +600,12 @@ def get_selinux_status(): log.error(err) return -1 else: - selinux_status=output.rstrip() + selinux_status = output.rstrip() log.debug("selinux status: %s", selinux_status) return selinux_status + def set_cronjob_user_share(): selinux_status = get_selinux_status() if (selinux_status == -1): @@ -637,6 +645,7 @@ def set_cronjob_user_share(): # we return a failure here return -1 + def initialise_scheduler(): ret = set_cronjob_user_share() if ret: @@ -675,7 +684,7 @@ def initialise_scheduler(): log.info("Successfully initialised snapshot scheduler for this node") output("Successfully initialised snapshot scheduler for this node") - gf_event (EVENT_SNAPSHOT_SCHEDULER_INITIALISED, status="Success") + gf_event(EVENT_SNAPSHOT_SCHEDULER_INITIALISED, status="Success") ret = 0 return ret @@ -687,25 +696,25 @@ def syntax_checker(args): output("Invalid Jobname. Jobname should not be empty and should not contain \" \" character.") ret = INVALID_JOBNAME return ret - args.jobname=args.jobname.strip() + args.jobname = args.jobname.strip() if hasattr(args, 'volname'): if (len(args.volname.split()) != 1): output("Invalid Volname. Volname should not be empty and should not contain \" \" character.") ret = INVALID_VOLNAME return ret - args.volname=args.volname.strip() + args.volname = args.volname.strip() if hasattr(args, 'schedule'): if (len(args.schedule.split()) != 5): output("Invalid Schedule. Please refer to the following for adding a valid cron schedule") - print ("* * * * *") - print ("| | | | |") - print ("| | | | +---- Day of the Week (range: 1-7, 1 standing for Monday)") - print ("| | | +------ Month of the Year (range: 1-12)") - print ("| | +-------- Day of the Month (range: 1-31)") - print ("| +---------- Hour (range: 0-23)") - print ("+------------ Minute (range: 0-59)") + print("* * * * *") + print("| | | | |") + print("| | | | +---- Day of the Week (range: 1-7, 1 standing for Monday)") + print("| | | +------ Month of the Year (range: 1-12)") + print("| | +-------- Day of the Month (range: 1-31)") + print("| +---------- Hour (range: 0-23)") + print("+------------ Minute (range: 0-59)") ret = INVALID_SCHEDULE return ret @@ -722,8 +731,8 @@ def perform_operation(args): ret = initialise_scheduler() if ret != 0: output("Failed to initialise snapshot scheduling") - gf_event (EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_INIT_FAILED, + error=print_error(ret)) return ret # Disable snapshot scheduler @@ -731,17 +740,17 @@ def perform_operation(args): ret = disable_scheduler() if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED, - status="Successfully Disabled") + gf_event(EVENT_SNAPSHOT_SCHEDULER_DISABLED, + status="Successfully Disabled") else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED, + error=print_error(ret)) return ret # Check if the symlink to GCRON_TASKS is properly set in the shared storage if (not os.path.lexists(GCRON_UPDATE_TASK) or - not os.path.lexists(GCRON_CROND_TASK) or - os.readlink(GCRON_CROND_TASK) != GCRON_TASKS): + not os.path.lexists(GCRON_CROND_TASK) or + os.readlink(GCRON_CROND_TASK) != GCRON_TASKS): print_str = ("Please run 'snap_scheduler.py' init to initialise " "the snap scheduler for the local node.") log.error(print_str) @@ -766,11 +775,11 @@ def perform_operation(args): ret = enable_scheduler() if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLED, - status="Successfully Enabled") + gf_event(EVENT_SNAPSHOT_SCHEDULER_ENABLED, + status="Successfully Enabled") else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_ENABLE_FAILED, + error=print_error(ret)) return ret # Disable snapshot scheduler @@ -778,11 +787,11 @@ def perform_operation(args): ret = disable_scheduler() if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLED, - status="Successfully Disabled") + gf_event(EVENT_SNAPSHOT_SCHEDULER_DISABLED, + status="Successfully Disabled") else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_DISABLE_FAILED, + error=print_error(ret)) return ret # List snapshot schedules @@ -798,12 +807,12 @@ def perform_operation(args): ret = add_schedules(args.jobname, args.schedule, args.volname) if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED, - status="Successfully added job "+args.jobname) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADDED, + status="Successfully added job "+args.jobname) else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED, - status="Failed to add job "+args.jobname, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_ADD_FAILED, + status="Failed to add job "+args.jobname, + error=print_error(ret)) return ret # Delete snapshot schedules @@ -814,12 +823,12 @@ def perform_operation(args): ret = delete_schedules(args.jobname) if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED, - status="Successfully deleted job "+args.jobname) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED, + status="Successfully deleted job "+args.jobname) else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED, - status="Failed to delete job "+args.jobname, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED, + status="Failed to delete job "+args.jobname, + error=print_error(ret)) return ret # Edit snapshot schedules @@ -830,17 +839,18 @@ def perform_operation(args): ret = edit_schedules(args.jobname, args.schedule, args.volname) if ret == 0: subprocess.Popen(["touch", "-h", GCRON_TASKS]) - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED, - status="Successfully edited job "+args.jobname) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDITED, + status="Successfully edited job "+args.jobname) else: - gf_event (EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED, - status="Failed to edit job "+args.jobname, - error=print_error(ret)) + gf_event(EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_EDIT_FAILED, + status="Failed to edit job "+args.jobname, + error=print_error(ret)) return ret ret = INVALID_ARG return ret + def gf_event(event_type, **kwargs): if EVENTS_ENABLED: from events.gf_event import gf_event as gfevent @@ -853,8 +863,8 @@ def main(argv): parser = argparse.ArgumentParser() subparsers = parser.add_subparsers(dest="action", metavar=('{init, status, enable,' - ' disable, list, add,' - ' delete, edit}')) + ' disable, list, add,' + ' delete, edit}')) subparsers.add_parser('init', help="Initialise the node for snapshot scheduling") From 1975fb9ed97e81b0da81d79a10cbb3f1c385949e Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:48:41 -0300 Subject: [PATCH 39/72] python linter: clean extras/snap_scheduler/gcron.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/snap_scheduler/gcron.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # extras/snap_scheduler/gcron.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- extras/snap_scheduler/gcron.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/snap_scheduler/gcron.py b/extras/snap_scheduler/gcron.py index 0e4df77d481..64e1620979d 100755 --- a/extras/snap_scheduler/gcron.py +++ b/extras/snap_scheduler/gcron.py @@ -147,7 +147,7 @@ def main(): # the GCRON_RELOAD_FLAG if os.path.exists(GCRON_RELOAD_FLAG): try: - os.remove(GCRON_RELOAD_FLAG); + os.remove(GCRON_RELOAD_FLAG) process = subprocess.Popen(["touch", "-h", GCRON_CROND_TASK], stdout=subprocess.PIPE, stderr=subprocess.PIPE) From fd899d4a373b6c41a0a46714436545e97e92b868 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:48:59 -0300 Subject: [PATCH 40/72] python linter: clean extras/rebalance/directory-rebalance.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/rebalance/directory-rebalance.py | 76 ++++++++++++++++++--------------- # 1 file changed, 42 insertions(+), 34 deletions(-) # extras/rebalance/directory-rebalance.py | 76 ++++++++++++++++++--------------- # 1 file changed, 42 insertions(+), 34 deletions(-) --- extras/rebalance/directory-rebalance.py | 76 ++++++++++++++----------- 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/extras/rebalance/directory-rebalance.py b/extras/rebalance/directory-rebalance.py index c679e6d09a1..c4cf7eab3e9 100755 --- a/extras/rebalance/directory-rebalance.py +++ b/extras/rebalance/directory-rebalance.py @@ -11,64 +11,70 @@ # import argparse -import os -import errno -import time -import sys import datetime +import errno import hashlib import logging +import os +import sys +import time + def size_fmt(num): - for unit in ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB']: + for unit in ['B', 'KiB', 'MiB', 'GiB', 'TiB', 'PiB', 'EiB', 'ZiB']: if abs(num) < 1024.0: return f"{num:7.2f} {unit}" num /= 1024.0 return "f{num:.2f} YiB" + def time_fmt(fr_sec): return str(datetime.timedelta(seconds=int(fr_sec))) + def crawl_progress(count, size): sys.stdout.write(f'Building index of {count} files with cumulative size {size} to attempt rebalance.\r') sys.stdout.flush() -#https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 + +# https://gist.github.com/vladignatyev/06860ec2040cb497f0f3 def progress(count, total, status=''): bar_len = 60 filled_len = int(round(bar_len * count / float(total))) percents = round(100.0 * count / float(total), 1) - bar = '=' * filled_len + '-' * (bar_len - filled_len) + bar_str = '=' * filled_len + '-' * (bar_len - filled_len) - sys.stdout.write(f'[{bar}] {percents}% ...{status}\r') + sys.stdout.write(f'[{bar_str}] {percents}% ...{status}\r') sys.stdout.flush() + def progress_done(): print() + class Rebalancer: def __init__(self, path): - self.path = path # Path on which migration script is executed - self.migration_duration = 0 #Time spent in migrating data - self.skipped_migration_duration = 0 #Time spent in migrating data - self.migrated_files = 0 #Number of files migrated successfully - self.total_files = 0 #Total number of files scanned - self.total_size = 0 #Cumulative size of files scanned so far - self.expected_total_size = 0 #This is calculated at the time of populating index - self.expected_total_files = 0 #This is calculated at the time of populating index - self.migrated_size = 0 #Cumulative size of files migrated + self.path = path # Path on which migration script is executed + self.migration_duration = 0 # Time spent in migrating data + self.skipped_migration_duration = 0 # Time spent in migrating data + self.migrated_files = 0 # Number of files migrated successfully + self.total_files = 0 # Total number of files scanned + self.total_size = 0 # Cumulative size of files scanned so far + self.expected_total_size = 0 # This is calculated at the time of populating index + self.expected_total_files = 0 # This is calculated at the time of populating index + self.migrated_size = 0 # Cumulative size of files migrated self.index = self.get_file_name('index') self.init_logging() - self.rebalance_start = 0 #Start time to be updated in run + self.rebalance_start = 0 # Start time to be updated in run def __enter__(self): return self - #Generate a unique name for the given path. format of the path will be - #rebalance--.suffix - #If the length of this name is > 255 then hiphenated-path is truncated to - #make space + # Generate a unique name for the given path. format of the path will be + # rebalance--.suffix + # If the length of this name is > 255 then hiphenated-path is truncated to + # make space def get_file_name(self, suffix): name_suffix = hashlib.md5(self.path.encode('utf-8')).hexdigest()[:8]+'.'+suffix name_suffix = '-' + name_suffix @@ -79,26 +85,26 @@ def get_file_name(self, suffix): name += name_suffix return name - #Log format is as follows - #2020-10-21 18:24:27.838 INFO /mnt/glusterfs/0/aaaaaaaaaa/1 - 1.0 KiB [1024] - 74.6 KiB/s + # Log format is as follows + # 2020-10-21 18:24:27.838 INFO /mnt/glusterfs/0/aaaaaaaaaa/1 - 1.0 KiB [1024] - 74.6 KiB/s def init_logging(self): logging.basicConfig(filename=self.get_file_name('log'), level=logging.DEBUG, format='%(asctime)s.%(msecs)03d %(levelname)s %(message)s', datefmt='%Y-%m-%d %H:%M:%S') - #Executes the setxattr syscall to trigger migration + # Executes the setxattr syscall to trigger migration def migrate_data(self, f): size_now = 0 try: size_now = os.stat(f).st_size os.setxattr(f, "trusted.distribute.migrate-data", b"1", follow_symlinks=False) - return True, size_now, None #Indicate that migration happened + return True, size_now, None # Indicate that migration happened except OSError as e: return False, size_now, e - #Updates the total,migrated,skipped files/size and durations of the migrations + # Updates the total,migrated,skipped files/size and durations of the migrations def migrate_with_stats(self, f, size): migration_start = time.perf_counter() result, size_now, err = self.migrate_data(f) @@ -110,8 +116,8 @@ def migrate_with_stats(self, f, size): if err.errno == errno.EEXIST: logging.info(f"{f} - Not needed") elif err.errno == errno.ENOENT: - #Account for file deletion - #File could be deleted just after stat, so update size_diff again + # Account for file deletion + # File could be deleted just after stat, so update size_diff again size_diff = -size self.expected_total_files -= 1 logging.info(f"{f} - file not present anymore") @@ -119,7 +125,7 @@ def migrate_with_stats(self, f, size): logging.critical(f"{f} - {err} - exiting.") raise err - #Account for size changes between indexing and rebalancing + # Account for size changes between indexing and rebalancing self.expected_total_size += size_diff size = size_now if result: @@ -160,7 +166,7 @@ def run(self): eta = ((self.expected_total_size - self.total_size)/speed)*migration_fraction progress(self.total_size, self.expected_total_size, f"ETA: {time_fmt(eta)}") - #For each file in the directory recursively, writes - to index file + # For each file in the directory recursively, writes - to index file def generate_rebalance_file_index(self): with open(self.index, 'w') as file_index: total_size = 0 @@ -178,7 +184,7 @@ def generate_rebalance_file_index(self): print(f"OS error: {err}") progress_done() - #Stops the progress printing and prints stats collected so far + # Stops the progress printing and prints stats collected so far def __exit__(self, exc_type, exc_value, traceback): progress_done() if self.rebalance_start != 0: @@ -193,8 +199,9 @@ def __exit__(self, exc_type, exc_value, traceback): print(f"Time spent in migration: {time_fmt(self.migration_duration)} [{self.migration_duration/self.duration:.2%}]") print(f"Time spent in skipping: {time_fmt(self.skipped_migration_duration)} [{self.skipped_migration_duration/self.duration:.2%}]") -#/proc/mounts has active mount information. It checks that the given path is -#mounted on glusterfs + +# /proc/mounts has active mount information. It checks that the given path is +# mounted on glusterfs def check_glusterfs_supported_path(p): real_path = os.path.realpath(p) if not os.path.isdir(real_path): @@ -220,6 +227,7 @@ def check_glusterfs_supported_path(p): raise argparse.ArgumentTypeError(f"{real_path} is not a valid glusterfs path") + if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument("path", type=check_glusterfs_supported_path) From 888e5634f042fe1011d126391287cf45423124db Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:04:55 -0300 Subject: [PATCH 41/72] python linter: clean extras/quota/xattr_analysis.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/xattr_analysis.py | 19 +++++++++++-------- # 1 file changed, 11 insertions(+), 8 deletions(-) # extras/quota/xattr_analysis.py | 19 +++++++++++-------- # 1 file changed, 11 insertions(+), 8 deletions(-) --- extras/quota/xattr_analysis.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py index 7bd7d96374c..4fa90a349a7 100755 --- a/extras/quota/xattr_analysis.py +++ b/extras/quota/xattr_analysis.py @@ -8,20 +8,23 @@ # find | xargs getfattr -d -m. -e hex > log_gluster_xattr from __future__ import print_function + import re import subprocess import sys + from hurry.filesize import size if len(sys.argv) < 2: sys.exit('Usage: %s log_gluster_xattr \n' - 'to generate log_gluster_xattr use: \n' - 'find | xargs getfattr -d -m. -e hex > log_gluster_xattr' - % sys.argv[0]) -LOG_FILE=sys.argv[1] + 'to generate log_gluster_xattr use: \n' + 'find | xargs getfattr -d -m. -e hex > log_gluster_xattr' + % sys.argv[0]) +LOG_FILE = sys.argv[1] + def get_quota_xattr_brick(): - out = subprocess.check_output (["/usr/bin/cat", LOG_FILE]) + out = subprocess.check_output(["/usr/bin/cat", LOG_FILE]) pairs = out.splitlines() xdict = {} @@ -30,7 +33,7 @@ def get_quota_xattr_brick(): k = xattr.split("=")[0] if re.search("# file:", k): print(xdict) - filename=k + filename = k print("=====" + filename + "=======") xdict = {} elif k is "": @@ -40,7 +43,8 @@ def get_quota_xattr_brick(): v = xattr.split("=")[1] if re.search("contri", k): if len(v) == 34: - # for files size is obtained in iatt, file count should be 1, dir count=0 + # for files size is obtained in iatt, file count + # should be 1, dir count=0 xdict['contri_file_count'] = int(v[18:34], 16) xdict['contri_dir_count'] = 0 else: @@ -70,4 +74,3 @@ def get_quota_xattr_brick(): if __name__ == '__main__': get_quota_xattr_brick() - From d23214be426c5b7cc2e51c6ffba2424bb494feba Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:20:34 -0300 Subject: [PATCH 42/72] python linter: clean extras/quota/quota_fsck.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/quota_fsck.py | 130 +++++++++++++++++++++++++-------------------- # 1 file changed, 72 insertions(+), 58 deletions(-) # extras/quota/quota_fsck.py | 130 +++++++++++++++++++++++++-------------------- # 1 file changed, 72 insertions(+), 58 deletions(-) --- extras/quota/quota_fsck.py | 130 ++++++++++++++++++++----------------- 1 file changed, 72 insertions(+), 58 deletions(-) diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py index e62f7fc52a3..624ade99797 100755 --- a/extras/quota/quota_fsck.py +++ b/extras/quota/quota_fsck.py @@ -3,7 +3,7 @@ # anomalies in quota accounting. Run this script with -h option # for further details. -''' +""" Copyright (c) 2018 Red Hat, Inc. This file is part of GlusterFS. @@ -11,12 +11,15 @@ General Public License, version 3 or any later version (LGPLv3 or later), or the GNU General Public License, version 2 (GPLv2), in all cases as published by the Free Software Foundation. -''' +""" from __future__ import print_function -import os, sys, re -from stat import * -import subprocess + import argparse +import os +import re +import subprocess +import sys +from stat import * import xattr aggr_size = {} @@ -27,7 +30,7 @@ file_count = 0 dir_count = 0 -#CONSTANTS +# CONSTANTS KB = 1024 MB = 1048576 GB = 1048576 * 1024 @@ -37,20 +40,21 @@ QUOTA_META_ABSENT = 1 QUOTA_SIZE_MISMATCH = 2 -IS_DIRTY ='0x3100' -IS_CLEAN ='0x3000' +IS_DIRTY = '0x3100' +IS_CLEAN = '0x3000' -epilog_msg=''' +epilog_msg = """ The script attempts to find any gluster accounting issues in the filesystem at the given subtree. The script crawls the given subdirectory tree doing a stat for all files and compares the size reported by gluster quota with the size reported by stat calls. Any mismatch is reported. In addition integrity of marker xattrs are verified. - ''' + """ + -def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None): +def print_msg(log_type, path, xattr_dict={}, stbuf="", dir_size=None): if log_type == QUOTA_VERBOSE: print('%-24s %-60s\nxattr_values: %s\n%s\n' % ("Verbose", path, xattr_dict, stbuf)) elif log_type == QUOTA_META_ABSENT: @@ -59,26 +63,30 @@ def print_msg(log_type, path, xattr_dict = {}, stbuf = "", dir_size = None): print("mismatch") if dir_size is not None: print('%24s %60s %12s %12s' % ("Size Mismatch", path, - xattr_dict, dir_size)) + xattr_dict, dir_size)) else: print('%-24s %-60s %-12s %-12s' % ("Size Mismatch", path, xattr_dict, - stbuf.st_size)) + stbuf.st_size)) + def size_differs_lot(s1, s2): - ''' + """Check size difference. + There could be minor accounting differences between the stat based accounting and gluster accounting. To avoid these from throwing lot of false positives in our logs. using a threshold of 1M for now. - TODO: For a deeply nested directory, at higher levels in hierarchy - differences may not be significant, hence this check needs to be improved. - ''' + """ + # TODO: For a deeply nested directory, at higher levels in hierarchy + # differences may not be significant, hence this check needs to be + # improved. if abs(s1-s2) > 0: return True else: return False + def fix_hardlink_accounting(curr_dict, accounted_dict, curr_size): - ''' + """ Hard links are messy.. we have to account them for their parent directory. But, stop accounting at the most common ancestor. Eg: @@ -104,7 +112,7 @@ def fix_hardlink_accounting(curr_dict, accounted_dict, curr_size): hardlinks that has already been accounted in current subtree. Also delete the duplicate link from curr_dict. - ''' + """ dual_accounted_links = set(curr_dict.keys()) & set(accounted_dict.keys()) for link in dual_accounted_links: @@ -121,21 +129,22 @@ def fix_xattr(file_name, mark_dirty): return if mark_dirty: print("MARKING DIRTY: " + file_name) - out = subprocess.check_output (["/usr/bin/setfattr", "-n", + out = subprocess.check_output(["/usr/bin/setfattr", "-n", "trusted.glusterfs.quota.dirty", "-v", IS_DIRTY, file_name]) rel_path = os.path.relpath(file_name, brick_path) - print("stat on " + mnt_path + "/" + rel_path) + print("stat on " + mnt_path + "/" + rel_path) stbuf = os.lstat(mnt_path + "/" + rel_path) obj_fix_count += 1 + def get_quota_xattr_brick(dpath): - out = subprocess.check_output (["/usr/bin/getfattr", "--no-dereference", - "-d", "-m.", "-e", "hex", dpath]) + out = subprocess.check_output(["/usr/bin/getfattr", "--no-dereference", + "-d", "-m.", "-e", "hex", dpath]) pairs = out.splitlines() - ''' + """ Sample output to be parsed: [root@dhcp35-100 mnt]# getfattr -d -m. -e hex /export/b1/B0/d14/d13/ # file: export/b1/B0/d14/d13/ @@ -146,12 +155,12 @@ def get_quota_xattr_brick(dpath): trusted.glusterfs.quota.6a7675a3-b85a-40c5-830b-de9229d702ce.contri.39=0x00000000000000000000000000000000000000000000000e trusted.glusterfs.quota.dirty=0x3000 trusted.glusterfs.quota.size.39=0x00000000000000000000000000000000000000000000000e - ''' + """ - ''' + """ xattr_dict dictionary holds quota related xattrs eg: - ''' + """ xattr_dict = {} xattr_dict['parents'] = {} @@ -174,8 +183,7 @@ def get_quota_xattr_brick(dpath): xattr_dict['version'] = xattr_version else: if xattr_version != xattr_dict['version']: - print("Multiple xattr version found") - + print("Multiple xattr version found") cur_parent = xattr_key.split(".")[3] if cur_parent not in xattr_dict['parents']: @@ -211,7 +219,8 @@ def get_quota_xattr_brick(dpath): return xattr_dict -def verify_file_xattr(path, stbuf = None): + +def verify_file_xattr(path, stbuf=None): global file_count file_count += 1 @@ -265,11 +274,13 @@ def verify_dir_xattr(path, dir_size): def walktree(t_dir, hard_link_dict): - '''recursively descend the directory tree rooted at dir, - aggregating the size - t_dir : directory to walk over. - hard_link_dict : dict of inodes with multiple hard_links under t_dir - ''' + """Walk the tree. + + recursively descend the directory tree rooted at dir, aggregating the size + + t_dir : directory to walk over. + hard_link_dict : dict of inodes with multiple hard_links under t_dir + """ global aggr_size aggr_size[t_dir] = 0 @@ -296,16 +307,16 @@ def walktree(t_dir, hard_link_dict): file_size = stbuf.st_size if stbuf.st_nlink > 2: # send a single element dict to check if file is accounted. - file_size = fix_hardlink_accounting({stbuf.st_ino:stbuf.st_size}, + file_size = fix_hardlink_accounting({stbuf.st_ino: stbuf.st_size}, hard_link_dict, stbuf.st_size) if file_size == 0: print_msg("HARD_LINK (skipped)", pathname, "", - stbuf) + stbuf) else: print_msg("HARD_LINK (accounted)", pathname, "", - stbuf) + stbuf) hard_link_dict[stbuf.st_ino] = stbuf.st_size if t_dir in aggr_size: @@ -325,7 +336,7 @@ def walktree(t_dir, hard_link_dict): # du also accounts for t_directory sizes # aggr_size[t_dir] += 4096 - #cleanup + # cleanup ret = aggr_size[t_dir] del aggr_size[t_dir] return ret @@ -337,27 +348,30 @@ def walktree(t_dir, hard_link_dict): parser.add_argument('brick_path', nargs=1, help='The brick path (or any descendent sub-directory of brick path)', ) - parser.add_argument('--full-logs', dest='verbose', action='store_true', - help=''' - log all the xattr values and stat values reported - for analysis. [CAUTION: This can give lot of output - depending on FS depth. So one has to make sure enough - disk space exists if redirecting to file] - ''' + parser.add_argument('--full-logs', + dest='verbose', action='store_true', + help=""" + log all the xattr values and stat values reported + for analysis. [CAUTION: This can give lot of output + depending on FS depth. So one has to make sure enough + disk space exists if redirecting to file] + """ ) - parser.add_argument('--fix-issues', metavar='mount_path', dest='mnt', action='store', - help=''' - fix accounting issues where the xattr values disagree - with stat sizes reported by gluster. A mount is also - required for this option to be used. - [CAUTION: This will directly modify backend xattr] - ''' + parser.add_argument('--fix-issues', + metavar='mount_path', dest='mnt', action='store', + help=""" + fix accounting issues where the xattr values disagree + with stat sizes reported by gluster. A mount is also + required for this option to be used. + [CAUTION: This will directly modify backend xattr] + """ ) - parser.add_argument('--sub-dir', metavar='sub_dir', dest='sub_dir', action='store', - help=''' - limit the crawling and accounting verification/correction - to a specific subdirectory. - ''' + parser.add_argument('--sub-dir', + metavar='sub_dir', dest='sub_dir', action='store', + help=""" + limit the crawling and accounting + verification/correction to a specific subdirectory. + """ ) args = parser.parse_args() From 9ad6c2c992cc1a6de34643f2bcd5efc74ec5ccf4 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:49:41 -0300 Subject: [PATCH 43/72] python linter: clean extras/hook-scripts/S40ufo-stop.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/hook-scripts/S40ufo-stop.py | 3 +-- # 1 file changed, 1 insertion(+), 2 deletions(-) # extras/hook-scripts/S40ufo-stop.py | 3 +-- # 1 file changed, 1 insertion(+), 2 deletions(-) --- extras/hook-scripts/S40ufo-stop.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/extras/hook-scripts/S40ufo-stop.py b/extras/hook-scripts/S40ufo-stop.py index 2c79eb1d54a..a2159878c9e 100755 --- a/extras/hook-scripts/S40ufo-stop.py +++ b/extras/hook-scripts/S40ufo-stop.py @@ -9,14 +9,13 @@ from gluster.swift.common.Glusterfs import get_mnt_point, unmount except ImportError: import sys - sys.exit("Openstack Swift does not appear to be installed properly"); + sys.exit("Openstack Swift does not appear to be installed properly") op = OptionParser(usage="%prog [options...]") op.add_option('--volname', dest='vol', type=str) op.add_option('--last', dest='last', type=str) (opts, args) = op.parse_args() - mnt_point = get_mnt_point(opts.vol) if mnt_point: unmount(mnt_point) From 5d93486c6e2af9dcf363b073727ff7ab23de6801 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:04:50 -0300 Subject: [PATCH 44/72] python linter: clean extras/geo-rep/schedule_georep.py.in This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/geo-rep/schedule_georep.py.in | 8 ++++---- # 1 file changed, 4 insertions(+), 4 deletions(-) # extras/geo-rep/schedule_georep.py.in | 8 ++++---- # 1 file changed, 4 insertions(+), 4 deletions(-) --- extras/geo-rep/schedule_georep.py.in | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/extras/geo-rep/schedule_georep.py.in b/extras/geo-rep/schedule_georep.py.in index 7d5a1017d49..cf51aea5ccb 100644 --- a/extras/geo-rep/schedule_georep.py.in +++ b/extras/geo-rep/schedule_georep.py.in @@ -45,10 +45,9 @@ SESSION_MOUNT_LOG_FILE = ("/var/log/glusterfs/geo-replication" USE_CLI_COLOR = True mnt_list = [] + class GlusterBadXmlFormat(Exception): - """ - Exception class for XML Parse Errors - """ + """Exception class for XML Parse Errors.""" pass @@ -344,7 +343,7 @@ def get_summary(primaryvol, secondary_url): summary["ok"] = True if session_name != "": - out.append([session_name, summary, faulty_rows, down_rows]) + out.append([session_name, summary, faulty_rows, down_rows]) return out @@ -454,6 +453,7 @@ def main(args): failure_msg="Unable to Remove temp directory " "{0}".format(mnt), exitcode=0) + if __name__ == "__main__": parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, description=__doc__) From 5eb294fbc9dc9424ef27f16d5794607697969dfc Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:04:48 -0300 Subject: [PATCH 45/72] python linter: clean extras/ganesha/scripts/generate-epoch.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/ganesha/scripts/generate-epoch.py | 23 +++++++++++++---------- # 1 file changed, 13 insertions(+), 10 deletions(-) # extras/ganesha/scripts/generate-epoch.py | 23 +++++++++++++---------- # 1 file changed, 13 insertions(+), 10 deletions(-) --- extras/ganesha/scripts/generate-epoch.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/extras/ganesha/scripts/generate-epoch.py b/extras/ganesha/scripts/generate-epoch.py index 77af014bab9..444551dfa61 100755 --- a/extras/ganesha/scripts/generate-epoch.py +++ b/extras/ganesha/scripts/generate-epoch.py @@ -18,26 +18,29 @@ # first 32-bit contains the now() time # rest 32-bit value contains the local glusterd node uuid -import time import binascii +import time + # Calculate the now() time into a 64-bit integer value def epoch_now(): - epoch_time = int(time.mktime(time.localtime())) << 32 - return epoch_time + epoch_time = int(time.mktime(time.localtime())) << 32 + return epoch_time + # Read glusterd UUID and extract first 32-bit of it def epoch_uuid(): - file_name = '/var/lib/glusterd/glusterd.info' + file_name = '/var/lib/glusterd/glusterd.info' + + for line in open(file_name): + if "UUID" in line: + glusterd_uuid = line.split('=')[1].strip() - for line in open(file_name): - if "UUID" in line: - glusterd_uuid = line.split('=')[1].strip() + uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-", "")) - uuid_bin = binascii.unhexlify(glusterd_uuid.replace("-","")) + epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000 + return epoch_uuid - epoch_uuid = int(binascii.hexlify(uuid_bin), 32) & 0xFFFF0000 - return epoch_uuid # Construct epoch as follows - # first 32-bit contains the now() time From e6310de39f20b068569f8724211d0ea5817e7e7d Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:43:25 -0300 Subject: [PATCH 46/72] python linter: clean extras/volfilter.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/volfilter.py | 294 +++++++++++++++++++++++++++------------------------- # 1 file changed, 154 insertions(+), 140 deletions(-) # extras/volfilter.py | 294 +++++++++++++++++++++++++++------------------------- # 1 file changed, 154 insertions(+), 140 deletions(-) --- extras/volfilter.py | 294 +++++++++++++++++++++++--------------------- 1 file changed, 154 insertions(+), 140 deletions(-) diff --git a/extras/volfilter.py b/extras/volfilter.py index 5558a1beff4..ab838880501 100644 --- a/extras/volfilter.py +++ b/extras/volfilter.py @@ -1,3 +1,4 @@ +"""Extras volfilter.""" # Copyright (c) 2010-2011 Red Hat, Inc. # # This file is part of HekaFS. @@ -14,155 +15,168 @@ # with HekaFS. If not, see . from __future__ import print_function + import copy import string import sys import types good_xlators = [ - "cluster/afr", - "cluster/dht", - "cluster/distribute", - "cluster/replicate", - "cluster/stripe", - "debug/io-stats", - "features/access-control", - "features/locks", - "features/marker", - "features/uidmap", - "performance/io-threads", - "protocol/client", - "protocol/server", - "storage/posix", + "cluster/afr", + "cluster/dht", + "cluster/distribute", + "cluster/replicate", + "cluster/stripe", + "debug/io-stats", + "features/access-control", + "features/locks", + "features/marker", + "features/uidmap", + "performance/io-threads", + "protocol/client", + "protocol/server", + "storage/posix", ] -def copy_stack (old_xl, suffix, recursive=False): - if recursive: - new_name = old_xl.name + "-" + suffix - else: - new_name = suffix - new_xl = Translator(new_name) - new_xl.type = old_xl.type - # The results with normal assignment here are . . . amusing. - new_xl.opts = copy.deepcopy(old_xl.opts) - for sv in old_xl.subvols: - new_xl.subvols.append(copy_stack(sv, suffix, True)) - # Patch up the path at the bottom. - if new_xl.type == "storage/posix": - new_xl.opts["directory"] += ("/" + suffix) - return new_xl - -def cleanup (parent, graph): - if parent.type in good_xlators: - # Temporary fix so that HekaFS volumes can use the - # SSL-enabled multi-threaded socket transport. - if parent.type == "protocol/server": - parent.type = "protocol/server2" - parent.opts["transport-type"] = "ssl" - elif parent.type == "protocol/client": - parent.type = "protocol/client2" - parent.opts["transport-type"] = "ssl" - sv = [] - for child in parent.subvols: - sv.append(cleanup(child, graph)) - parent.subvols = sv - else: - parent = cleanup(parent.subvols[0], graph) - return parent + +def copy_stack(old_xl, suffix, recursive=False): + """Copy translator stack to new stack.""" + if recursive: + new_name = old_xl.name + "-" + suffix + else: + new_name = suffix + new_xl = Translator(new_name) + new_xl.type = old_xl.type + # The results with normal assignment here are . . . amusing. + new_xl.opts = copy.deepcopy(old_xl.opts) + for sv in old_xl.subvols: + new_xl.subvols.append(copy_stack(sv, suffix, True)) + # Patch up the path at the bottom. + if new_xl.type == "storage/posix": + new_xl.opts["directory"] += ("/" + suffix) + return new_xl + + +def cleanup(parent, graph): + if parent.type in good_xlators: + # Temporary fix so that HekaFS volumes can use the + # SSL-enabled multi-threaded socket transport. + if parent.type == "protocol/server": + parent.type = "protocol/server2" + parent.opts["transport-type"] = "ssl" + elif parent.type == "protocol/client": + parent.type = "protocol/client2" + parent.opts["transport-type"] = "ssl" + sv = [] + for child in parent.subvols: + sv.append(cleanup(child, graph)) + parent.subvols = sv + else: + parent = cleanup(parent.subvols[0], graph) + return parent + class Translator: - def __init__ (self, name): - self.name = name - self.type = "" - self.opts = {} - self.subvols = [] - self.dumped = False - def __repr__ (self): - return "" % self.name - -def load (path): - # If it's a string, open it; otherwise, assume it's already a - # file-like object (most notably from urllib*). - if type(path) in (str,): - fp = file(path, "r") - else: - fp = path - all_xlators = {} - xlator = None - last_xlator = None - while True: - text = fp.readline() - if text == "": - break - text = text.split() - if not len(text): - continue - if text[0] == "volume": - if xlator: - raise RuntimeError("nested volume definition") - xlator = Translator(text[1]) - continue - if not xlator: - raise RuntimeError("text outside volume definition") - if text[0] == "type": - xlator.type = text[1] - continue - if text[0] == "option": - xlator.opts[text[1]] = ''.join(text[2:]) - continue - if text[0] == "subvolumes": - for sv in text[1:]: - xlator.subvols.append(all_xlators[sv]) - continue - if text[0] == "end-volume": - all_xlators[xlator.name] = xlator - last_xlator = xlator - xlator = None - continue - raise RuntimeError("unrecognized keyword %s" % text[0]) - if xlator: - raise RuntimeError("unclosed volume definition") - return all_xlators, last_xlator - -def generate (graph, last, stream=sys.stdout): - for sv in last.subvols: - if not sv.dumped: - generate(graph, sv, stream) - print("", file=stream) - sv.dumped = True - print("volume %s" % last.name, file=stream) - print(" type %s" % last.type, file=stream) - for k, v in last.opts.items(): - print(" option %s %s" % (k, v), file=stream) - if last.subvols: - print(" subvolumes %s" % ''.join( - [ sv.name for sv in last.subvols ]), file=stream) - print("end-volume", file=stream) - -def push_filter (graph, old_xl, filt_type, opts={}): - suffix = "-" + old_xl.type.split("/")[1] - if len(old_xl.name) > len(suffix): - if old_xl.name[-len(suffix):] == suffix: - old_xl.name = old_xl.name[:-len(suffix)] - new_xl = Translator(old_xl.name+suffix) - new_xl.type = old_xl.type - new_xl.opts = old_xl.opts - new_xl.subvols = old_xl.subvols - graph[new_xl.name] = new_xl - old_xl.name += ("-" + filt_type.split("/")[1]) - old_xl.type = filt_type - old_xl.opts = opts - old_xl.subvols = [new_xl] - graph[old_xl.name] = old_xl - -def delete (graph, victim): - if len(victim.subvols) != 1: - raise RuntimeError("attempt to delete non-unary translator") - for xl in graph.itervalues(): - while xl.subvols.count(victim): - i = xl.subvols.index(victim) - xl.subvols[i] = victim.subvols[0] + def __init__(self, name): + """Build a translator object.""" + self.name = name + self.type = "" + self.opts = {} + self.subvols = [] + self.dumped = False + + def __repr__(self): + """Human-readable representation.""" + return "" % self.name + + +def load(path): + # If it's a string, open it; otherwise, assume it's already a + # file-like object (most notably from urllib*). + if type(path) in (str,): + fp = file(path, "r") + else: + fp = path + all_xlators = {} + xlator = None + last_xlator = None + while True: + text = fp.readline() + if text == "": + break + text = text.split() + if not len(text): + continue + if text[0] == "volume": + if xlator: + raise RuntimeError("nested volume definition") + xlator = Translator(text[1]) + continue + if not xlator: + raise RuntimeError("text outside volume definition") + if text[0] == "type": + xlator.type = text[1] + continue + if text[0] == "option": + xlator.opts[text[1]] = ''.join(text[2:]) + continue + if text[0] == "subvolumes": + for sv in text[1:]: + xlator.subvols.append(all_xlators[sv]) + continue + if text[0] == "end-volume": + all_xlators[xlator.name] = xlator + last_xlator = xlator + xlator = None + continue + raise RuntimeError("unrecognized keyword %s" % text[0]) + if xlator: + raise RuntimeError("unclosed volume definition") + return all_xlators, last_xlator + + +def generate(graph, last, stream=sys.stdout): + for sv in last.subvols: + if not sv.dumped: + generate(graph, sv, stream) + print("", file=stream) + sv.dumped = True + print("volume %s" % last.name, file=stream) + print(" type %s" % last.type, file=stream) + for k, v in last.opts.items(): + print(" option %s %s" % (k, v), file=stream) + if last.subvols: + print(" subvolumes %s" % ''.join( + [sv.name for sv in last.subvols]), file=stream) + print("end-volume", file=stream) + + +def push_filter(graph, old_xl, filt_type, opts={}): + suffix = "-" + old_xl.type.split("/")[1] + if len(old_xl.name) > len(suffix): + if old_xl.name[-len(suffix):] == suffix: + old_xl.name = old_xl.name[:-len(suffix)] + new_xl = Translator(old_xl.name+suffix) + new_xl.type = old_xl.type + new_xl.opts = old_xl.opts + new_xl.subvols = old_xl.subvols + graph[new_xl.name] = new_xl + old_xl.name += ("-" + filt_type.split("/")[1]) + old_xl.type = filt_type + old_xl.opts = opts + old_xl.subvols = [new_xl] + graph[old_xl.name] = old_xl + + +def delete(graph, victim): + if len(victim.subvols) != 1: + raise RuntimeError("attempt to delete non-unary translator") + for xl in graph.itervalues(): + while xl.subvols.count(victim): + i = xl.subvols.index(victim) + xl.subvols[i] = victim.subvols[0] + if __name__ == "__main__": - graph, last = load(sys.argv[1]) - generate(graph, last) + graph, last = load(sys.argv[1]) + generate(graph, last) From fed5f3e109ca0f56331e180dac10b54f0dd735a8 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:44:30 -0300 Subject: [PATCH 47/72] python linter: clean extras/rebalance.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/rebalance.py | 537 ++++++++++++++++++++++++++-------------------------- # 1 file changed, 273 insertions(+), 264 deletions(-) # extras/rebalance.py | 537 ++++++++++++++++++++++++++-------------------------- # 1 file changed, 273 insertions(+), 264 deletions(-) --- extras/rebalance.py | 537 ++++++++++++++++++++++---------------------- 1 file changed, 273 insertions(+), 264 deletions(-) diff --git a/extras/rebalance.py b/extras/rebalance.py index 37c68ebbb42..daf5aff3d30 100755 --- a/extras/rebalance.py +++ b/extras/rebalance.py @@ -12,63 +12,69 @@ import subprocess import sys import tempfile + import volfilter import platform + # It's just more convenient to have named fields. class Brick: - def __init__ (self, path, name): - self.path = path - self.sv_name = name - self.size = 0 - self.curr_size = 0 - self.good_size = 0 - def set_size (self, size): - self.size = size - def set_range (self, rs, re): - self.r_start = rs - self.r_end = re - self.curr_size = self.r_end - self.r_start + 1 - def __repr__ (self): - value = self.path[:] - value += "(%d," % self.size - if self.curr_size: - value += "0x%x,0x%x)" % (self.r_start, self.r_end) - else: - value += "-)" - return value - -def get_bricks (host, vol): - t = pipes.Template() - t.prepend("gluster --remote-host=%s system getspec %s"%(host, vol), ".-") - return t.open(None, "r") - -def generate_stanza (vf, all_xlators, cur_subvol): - sv_list = [] - for sv in cur_subvol.subvols: - generate_stanza(vf, all_xlators, sv) - sv_list.append(sv.name) - vf.write("volume %s\n" % cur_subvol.name) - vf.write(" type %s\n" % cur_subvol.type) - for kvpair in cur_subvol.opts.items(): - vf.write(" option %s %s\n" % kvpair) - if sv_list: - vf.write(" subvolumes %s\n" % ''.join(sv_list)) - vf.write("end-volume\n\n") - - -def mount_brick (localpath, all_xlators, dht_subvol): - - # Generate a volfile. - vf_name = localpath + ".vol" - vf = open(vf_name, "w") - generate_stanza(vf, all_xlators, dht_subvol) - vf.flush() - vf.close() - - # Create a brick directory and mount the brick there. - os.mkdir(localpath) - subprocess.call(["glusterfs", "-f", vf_name, localpath]) + def __init__(self, path, name): + self.path = path + self.sv_name = name + self.size = 0 + self.curr_size = 0 + self.good_size = 0 + + def set_size(self, size): + self.size = size + + def set_range(self, rs, re): + self.r_start = rs + self.r_end = re + self.curr_size = self.r_end - self.r_start + 1 + + def __repr__(self): + value = self.path[:] + value += "(%d," % self.size + if self.curr_size: + value += "0x%x,0x%x)" % (self.r_start, self.r_end) + else: + value += "-)" + return value + + +def get_bricks(host, vol): + t = pipes.Template() + t.prepend("gluster --remote-host=%s system getspec %s" % (host, vol), ".-") + return t.open(None, "r") + + +def generate_stanza(vf, all_xlators, cur_subvol): + sv_list = [] + for sv in cur_subvol.subvols: + generate_stanza(vf, all_xlators, sv) + sv_list.append(sv.name) + vf.write("volume %s\n" % cur_subvol.name) + vf.write(" type %s\n" % cur_subvol.type) + for kvpair in cur_subvol.opts.items(): + vf.write(" option %s %s\n" % kvpair) + if sv_list: + vf.write(" subvolumes %s\n" % ''.join(sv_list)) + vf.write("end-volume\n\n") + + +def mount_brick(localpath, all_xlators, dht_subvol): + # Generate a volfile. + vf_name = localpath + ".vol" + vf = open(vf_name, "w") + generate_stanza(vf, all_xlators, dht_subvol) + vf.flush() + vf.close() + # Create a brick directory and mount the brick there. + os.mkdir(localpath) + subprocess.call(["glusterfs", "-f", vf_name, localpath]) + # We use the command-line tools because there's no getxattr support in the # Python standard library (which is ridiculous IMO). Adding the xattr package @@ -79,231 +85,234 @@ def mount_brick (localpath, all_xlators, dht_subvol): # We might have to revisit this if we get as far as actually issuing millions # of setxattr requests. Even then, it might be better to do that part with a C # program which has only a build-time dependency. -def get_range (brick): - t = pipes.Template() - cmd = "getfattr -e hex -n trusted.glusterfs.dht %s 2> /dev/null" - t.prepend(cmd%brick, ".-") - t.append("grep ^trusted.glusterfs.dht=", "--") - f = t.open(None, "r") - try: - value = f.readline().rstrip().split('=')[1][2:] - except: - print("could not get layout for %s (might be OK)" % brick) - return None - v_start = int("0x"+value[16:24], 16) - v_end = int("0x"+value[24:32], 16) - return (v_start, v_end) - -def calc_sizes (bricks, total): - leftover = 1 << 32 +def get_range(brick): + t = pipes.Template() + cmd = "getfattr -e hex -n trusted.glusterfs.dht %s 2> /dev/null" + t.prepend(cmd % brick, ".-") + t.append("grep ^trusted.glusterfs.dht=", "--") + f = t.open(None, "r") + try: + value = f.readline().rstrip().split('=')[1][2:] + except: + print("could not get layout for %s (might be OK)" % brick) + return None + v_start = int("0x"+value[16:24], 16) + v_end = int("0x"+value[24:32], 16) + return (v_start, v_end) + + +def calc_sizes(bricks, total): + leftover = 1 << 32 + for b in bricks: + if b.size: + b.good_size = (b.size << 32) / total + leftover -= b.good_size + else: + b.good_size = 0 + if leftover: + # Add the leftover to an old brick if we can. for b in bricks: - if b.size: - b.good_size = (b.size << 32) / total - leftover -= b.good_size - else: - b.good_size = 0 - if leftover: - # Add the leftover to an old brick if we can. - for b in bricks: - if b.good_size: - b.good_size += leftover - break - else: - # Fine, just add it wherever. - bricks[0].good_size += leftover + if b.good_size: + b.good_size += leftover + break + else: + # Fine, just add it wherever. + bricks[0].good_size += leftover + # Normalization means sorting the bricks by r_start and (b) ensuring that there # are no gaps. -def normalize (in_bricks): - out_bricks = [] - curr_hash = 0 - used = 0 - while curr_hash < (1<<32): - curr_best = None - for b in in_bricks: - if b.r_start == curr_hash: - used += 1 - out_bricks.append(b) - in_bricks.remove(b) - curr_hash = b.r_end + 1 - break - else: - print("gap found at 0x%08x" % curr_hash) - sys.exit(1) - return out_bricks + in_bricks, used - -def get_score (bricks): - score = 0 - curr_hash = 0 - for b in bricks: - if not b.curr_size: - curr_hash += b.good_size - continue - new_start = curr_hash - curr_hash += b.good_size - new_end = curr_hash - 1 - if new_start > b.r_start: - max_start = new_start - else: - max_start = b.r_start - if new_end < b.r_end: - min_end = new_end - else: - min_end = b.r_end - if max_start <= min_end: - score += (min_end - max_start + 1) - return score +def normalize(in_bricks): + out_bricks = [] + curr_hash = 0 + used = 0 + while curr_hash < (1 << 32): + curr_best = None + for b in in_bricks: + if b.r_start == curr_hash: + used += 1 + out_bricks.append(b) + in_bricks.remove(b) + curr_hash = b.r_end + 1 + break + else: + print("gap found at 0x%08x" % curr_hash) + sys.exit(1) + return out_bricks + in_bricks, used -if __name__ == "__main__": - my_usage = "%prog [options] server volume [directory]" - parser = optparse.OptionParser(usage=my_usage) - parser.add_option("-f", "--free-space", dest="free_space", - default=False, action="store_true", - help="use free space instead of total space") - parser.add_option("-l", "--leave-mounted", dest="leave_mounted", - default=False, action="store_true", - help="leave subvolumes mounted") - parser.add_option("-v", "--verbose", dest="verbose", - default=False, action="store_true", - help="verbose output") - options, args = parser.parse_args() - - if len(args) == 3: - fix_dir = args[2] +def get_score(bricks): + score = 0 + curr_hash = 0 + for b in bricks: + if not b.curr_size: + curr_hash += b.good_size + continue + new_start = curr_hash + curr_hash += b.good_size + new_end = curr_hash - 1 + if new_start > b.r_start: + max_start = new_start else: - if len(args) != 2: - parser.print_help() - sys.exit(1) - fix_dir = None - hostname, volname = args[:2] - - # Make sure stuff gets cleaned up, even if there are exceptions. - orig_dir = os.getcwd() - work_dir = tempfile.mkdtemp() - bricks = [] - def cleanup_workdir (): - os.chdir(orig_dir) - if options.verbose: - print("Cleaning up %s" % work_dir) - for b in bricks: - subprocess.call(["umount", b.path]) - shutil.rmtree(work_dir) - if not options.leave_mounted: - atexit.register(cleanup_workdir) - os.chdir(work_dir) - - # Mount each brick individually, so we can issue brick-specific calls. - if options.verbose: - print("Mounting subvolumes...") - index = 0 - volfile_pipe = get_bricks(hostname, volname) - all_xlators, last_xlator = volfilter.load(volfile_pipe) - for dht_vol in all_xlators.itervalues(): - if dht_vol.type == "cluster/distribute": - break + max_start = b.r_start + if new_end < b.r_end: + min_end = new_end else: - print("no DHT volume found") - sys.exit(1) - for sv in dht_vol.subvols: - #print "found subvol %s" % sv.name - lpath = "%s/brick%s" % (work_dir, index) - index += 1 - mount_brick(lpath, all_xlators, sv) - bricks.append(Brick(lpath, sv.name)) - if index == 0: - print("no bricks") - sys.exit(1) + min_end = b.r_end + if max_start <= min_end: + score += (min_end - max_start + 1) + return score + + +if __name__ == "__main__": + my_usage = "%prog [options] server volume [directory]" + parser = optparse.OptionParser(usage=my_usage) + parser.add_option("-f", "--free-space", dest="free_space", + default=False, action="store_true", + help="use free space instead of total space") + parser.add_option("-l", "--leave-mounted", dest="leave_mounted", + default=False, action="store_true", + help="leave subvolumes mounted") + parser.add_option("-v", "--verbose", dest="verbose", + default=False, action="store_true", + help="verbose output") + options, args = parser.parse_args() - # Collect all of the sizes. + if len(args) == 3: + fix_dir = args[2] + else: + if len(args) != 2: + parser.print_help() + sys.exit(1) + fix_dir = None + hostname, volname = args[:2] + + # Make sure stuff gets cleaned up, even if there are exceptions. + orig_dir = os.getcwd() + work_dir = tempfile.mkdtemp() + bricks = [] + + def cleanup_workdir(): + os.chdir(orig_dir) if options.verbose: - print("Collecting information...") - total = 0 - for b in bricks: - info = os.statvfs(b.path) - # On FreeBSD f_bsize (info[0]) contains the optimal I/O size, - # not the block size as it's found on Linux. In this case we - # use f_frsize (info[1]). - if platform.system() == 'FreeBSD': - bsize = info[1] - else: - bsize = info[0] - # We want a standard unit even if different bricks use - # different block sizes. The size is chosen to avoid overflows - # for very large bricks with very small block sizes, but also - # accommodate filesystems which use very large block sizes to - # cheat on benchmarks. - blocksper100mb = 104857600 / bsize - if options.free_space: - size = info[3] / blocksper100mb - else: - size = info[2] / blocksper100mb - if size <= 0: - print("brick %s has invalid size %d" % (b.path, size)) - sys.exit(1) - b.set_size(size) - total += size - - # Collect all of the layout information. + print("Cleaning up %s" % work_dir) for b in bricks: - hash_range = get_range(b.path) - if hash_range is not None: - rs, re = hash_range - if rs > re: - print("%s has backwards hash range" % b.path) - sys.exit(1) - b.set_range(hash_range[0], hash_range[1]) + subprocess.call(["umount", b.path]) + shutil.rmtree(work_dir) + if not options.leave_mounted: + atexit.register(cleanup_workdir) + os.chdir(work_dir) - if options.verbose: - print("Calculating new layouts...") - calc_sizes(bricks, total) - bricks, used = normalize(bricks) - - # We can't afford O(n!) here, but O(n^2) should be OK and the result - # should be almost as good. - while used < len(bricks): - best_place = used - best_score = get_score(bricks) - for i in range(used): - new_bricks = bricks[:] - del new_bricks[used] - new_bricks.insert(i, bricks[used]) - new_score = get_score(new_bricks) - if new_score > best_score: - best_place = i - best_score = new_score - if best_place != used: - nb = bricks[used] - del bricks[used] - bricks.insert(best_place, nb) - used += 1 + # Mount each brick individually, so we can issue brick-specific calls. + if options.verbose: + print("Mounting subvolumes...") + index = 0 + volfile_pipe = get_bricks(hostname, volname) + all_xlators, last_xlator = volfilter.load(volfile_pipe) + for dht_vol in all_xlators.itervalues(): + if dht_vol.type == "cluster/distribute": + break + else: + print("no DHT volume found") + sys.exit(1) + for sv in dht_vol.subvols: + # print "found subvol %s" % sv.name + lpath = "%s/brick%s" % (work_dir, index) + index += 1 + mount_brick(lpath, all_xlators, sv) + bricks.append(Brick(lpath, sv.name)) + if index == 0: + print("no bricks") + sys.exit(1) - # Finalize whatever we decided on. - curr_hash = 0 - for b in bricks: - b.r_start = curr_hash - curr_hash += b.good_size - b.r_end = curr_hash - 1 + # Collect all of the sizes. + if options.verbose: + print("Collecting information...") + total = 0 + for b in bricks: + info = os.statvfs(b.path) + # On FreeBSD f_bsize (info[0]) contains the optimal I/O size, + # not the block size as it's found on Linux. In this case we + # use f_frsize (info[1]). + if platform.system() == 'FreeBSD': + bsize = info[1] + else: + bsize = info[0] + # We want a standard unit even if different bricks use + # different block sizes. The size is chosen to avoid overflows + # for very large bricks with very small block sizes, but also + # accommodate filesystems which use very large block sizes to + # cheat on benchmarks. + blocksper100mb = 104857600 / bsize + if options.free_space: + size = info[3] / blocksper100mb + else: + size = info[2] / blocksper100mb + if size <= 0: + print("brick %s has invalid size %d" % (b.path, size)) + sys.exit(1) + b.set_size(size) + total += size + + # Collect all of the layout information. + for b in bricks: + hash_range = get_range(b.path) + if hash_range is not None: + rs, re = hash_range + if rs > re: + print("%s has backwards hash range" % b.path) + sys.exit(1) + b.set_range(hash_range[0], hash_range[1]) + + if options.verbose: + print("Calculating new layouts...") + calc_sizes(bricks, total) + bricks, used = normalize(bricks) - print("Here are the xattr values for your size-weighted layout:") + # We can't afford O(n!) here, but O(n^2) should be OK and the result + # should be almost as good. + while used < len(bricks): + best_place = used + best_score = get_score(bricks) + for i in range(used): + new_bricks = bricks[:] + del new_bricks[used] + new_bricks.insert(i, bricks[used]) + new_score = get_score(new_bricks) + if new_score > best_score: + best_place = i + best_score = new_score + if best_place != used: + nb = bricks[used] + del bricks[used] + bricks.insert(best_place, nb) + used += 1 + + # Finalize whatever we decided on. + curr_hash = 0 + for b in bricks: + b.r_start = curr_hash + curr_hash += b.good_size + b.r_end = curr_hash - 1 + + print("Here are the xattr values for your size-weighted layout:") + for b in bricks: + print(" %s: 0x0000000200000000%08x%08x" % ( + b.sv_name, b.r_start, b.r_end)) + + if fix_dir: + if options.verbose: + print("Fixing layout for %s" % fix_dir) for b in bricks: - print(" %s: 0x0000000200000000%08x%08x" % ( - b.sv_name, b.r_start, b.r_end)) - - if fix_dir: - if options.verbose: - print("Fixing layout for %s" % fix_dir) - for b in bricks: - value = "0x0000000200000000%08x%08x" % ( - b.r_start, b.r_end) - path = "%s/%s" % (b.path, fix_dir) - cmd = "setfattr -n trusted.glusterfs.dht -v %s %s" % ( - value, path) - print(cmd) - - if options.leave_mounted: - print("The following subvolumes are still mounted:") - for b in bricks: - print("%s on %s" % (b.sv_name, b.path)) - print("Don't forget to clean up when you're done.") + value = "0x0000000200000000%08x%08x" % ( + b.r_start, b.r_end) + path = "%s/%s" % (b.path, fix_dir) + cmd = "setfattr -n trusted.glusterfs.dht -v %s %s" % ( + value, path) + print(cmd) + if options.leave_mounted: + print("The following subvolumes are still mounted:") + for b in bricks: + print("%s on %s" % (b.sv_name, b.path)) + print("Don't forget to clean up when you're done.") From d4352086ab75347267b90a04dbb2ab81a3a9aca3 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 18:53:20 -0300 Subject: [PATCH 48/72] python linter: clean extras/gnfs-loganalyse.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/gnfs-loganalyse.py | 323 +++++++++++++++++++++++----------------------- # 1 file changed, 165 insertions(+), 158 deletions(-) # extras/gnfs-loganalyse.py | 323 +++++++++++++++++++++++----------------------- # 1 file changed, 165 insertions(+), 158 deletions(-) --- extras/gnfs-loganalyse.py | 323 +++++++++++++++++++------------------- 1 file changed, 165 insertions(+), 158 deletions(-) diff --git a/extras/gnfs-loganalyse.py b/extras/gnfs-loganalyse.py index 6341d007188..9023e5f804e 100755 --- a/extras/gnfs-loganalyse.py +++ b/extras/gnfs-loganalyse.py @@ -11,156 +11,163 @@ """ from __future__ import print_function + import os import string import sys class NFSRequest: - def requestIsEntryOp (self): - op = self.op - if op == "CREATE" or op == "LOOKUP" or op == "REMOVE" or op == "LINK" or op == "RENAME" or op == "MKDIR" or op == "RMDIR" or op == "SYMLINK" or op == "MKNOD": - return 1 - else: - return 0 - - def __init__ (self, logline, linecount): - self.calllinecount = 0 - self.xid = "" - self.op = "" - self.opdata = "" - self.replydata = "" - self.replylinecount = 0 - self.timestamp = "" - self.entryname = "" - self.gfid = "" - self.replygfid = "" - - tokens = logline.strip ().split (" ") - self.timestamp = tokens[0] + " " + tokens[1] - if "XID:" not in tokens: - return None - - if "args:" not in tokens: - return None - - self.calllinecount = linecount - - xididx = tokens.index ("XID:") - self.xid = tokens [xididx+1].strip(",") - - opidx = tokens.index ("args:") - self.op = tokens [opidx-1].strip (":") - self.opdata = " ".join(tokens [opidx+1:]) - if self.requestIsEntryOp (): - nameidx = tokens.index ("name:") - self.entryname = tokens[nameidx + 1].strip (",") - gfididx = tokens.index ("gfid") - self.gfid = tokens[gfididx +1].strip(",") - - - def getXID (self): - return self.xid - - def setReply (self, logline, linecount): - tokens = logline.strip ().split (" ") - timestamp = tokens[0] + " " + tokens[1] - statidx = tokens.index ("NFS:") - self.replydata = " TimeStamp: " + timestamp + " " + " ".join (tokens [statidx+1:]) - self.replylinecount = linecount - if "gfid" in tokens: - gfididx = tokens.index ("gfid") - self.replygfid = tokens [gfididx + 1].strip(",") - - def dump (self): - print("ReqLine: " + str(self.calllinecount) + " TimeStamp: " + self.timestamp + ", XID: " + self.xid + " " + self.op + " ARGS: " + self.opdata + " RepLine: " + str(self.replylinecount) + " " + self.replydata) + def requestIsEntryOp(self): + op = self.op + if op == "CREATE" \ + or op == "LOOKUP" \ + or op == "REMOVE" \ + or op == "LINK" \ + or op == "RENAME" \ + or op == "MKDIR" \ + or op == "RMDIR" \ + or op == "SYMLINK" \ + or op == "MKNOD": + return 1 + else: + return 0 + + def __init__(self, logline, linecount): + self.calllinecount = 0 + self.xid = "" + self.op = "" + self.opdata = "" + self.replydata = "" + self.replylinecount = 0 + self.timestamp = "" + self.entryname = "" + self.gfid = "" + self.replygfid = "" + + tokens = logline.strip().split(" ") + self.timestamp = tokens[0] + " " + tokens[1] + if "XID:" not in tokens: + return None + + if "args:" not in tokens: + return None + + self.calllinecount = linecount + + xididx = tokens.index("XID:") + self.xid = tokens[xididx+1].strip(",") + + opidx = tokens.index("args:") + self.op = tokens[opidx-1].strip(":") + self.opdata = " ".join(tokens[opidx+1:]) + if self.requestIsEntryOp(): + nameidx = tokens.index("name:") + self.entryname = tokens[nameidx + 1].strip(",") + gfididx = tokens.index("gfid") + self.gfid = tokens[gfididx + 1].strip(",") + + def getXID(self): + return self.xid + + def setReply(self, logline, linecount): + tokens = logline.strip().split(" ") + timestamp = tokens[0] + " " + tokens[1] + statidx = tokens.index("NFS:") + self.replydata = " TimeStamp: " + timestamp + " " + " ".join(tokens[statidx + 1:]) + self.replylinecount = linecount + if "gfid" in tokens: + gfididx = tokens.index("gfid") + self.replygfid = tokens[gfididx + 1].strip(",") + + def dump(self): + print("ReqLine: " + str(self.calllinecount) + " TimeStamp: " + self.timestamp + ", XID: " + self.xid + " " + self.op + " ARGS: " + self.opdata + " RepLine: " + str(self.replylinecount) + " " + self.replydata) -class NFSLogAnalyzer: - def __init__ (self, optn, trackfilename, tracknamefh, stats): - self.stats = stats - self.xid_request_map = {} - self.orphan_replies = {} - self.rqlist = [] - self.CALL = 1 - self.REPLY = 2 - self.optn = optn - self.trackfilename = trackfilename - self.tracknamefh = tracknamefh - self.trackedfilehandles = [] - - def handle_call_line (self, logline, linecount): - newreq = NFSRequest (logline, linecount) - xid = newreq.getXID () - if (self.optn == SYNTHESIZE): - self.xid_request_map [xid] = newreq +class NFSLogAnalyzer: + def __init__(self, optn, trackfilename, tracknamefh, stats): + self.stats = stats + self.xid_request_map = {} + self.orphan_replies = {} + self.rqlist = [] + self.CALL = 1 + self.REPLY = 2 + self.optn = optn + self.trackfilename = trackfilename + self.tracknamefh = tracknamefh + self.trackedfilehandles = [] + + def handle_call_line(self, logline, linecount): + newreq = NFSRequest(logline, linecount) + xid = newreq.getXID() + if (self.optn == SYNTHESIZE): + self.xid_request_map[xid] = newreq + self.rqlist.append(newreq) + elif self.optn == TRACKFILENAME: + if newreq.requestIsEntryOp(): + if newreq.entryname == self.trackfilename: + self.xid_request_map[xid] = newreq + self.rqlist.append(newreq) + else: + del newreq + elif self.tracknamefh == ENABLE_TRACKNAME_FH: + if len(self.trackedfilehandles) > 0: + if newreq.gfid in self.trackedfilehandles: + self.xid_request_map[xid] = newreq self.rqlist.append(newreq) - elif self.optn == TRACKFILENAME: - if newreq.requestIsEntryOp(): - if newreq.entryname == self.trackfilename: - self.xid_request_map [xid] = newreq - self.rqlist.append(newreq) - else: - del newreq - elif self.tracknamefh == ENABLE_TRACKNAME_FH: - if len (self.trackedfilehandles) > 0: - if newreq.gfid in self.trackedfilehandles: - self.xid_request_map [xid] = newreq - self.rqlist.append(newreq) - else: - del newreq - else: - del newreq - else: - del newreq - - - def handle_reply_line (self, logline, linecount): - tokens = logline.strip ().split (" ") - - xididx = tokens.index ("XID:") - xid = tokens [xididx + 1].strip(",") - if xid not in self.xid_request_map.keys (): - self.orphan_replies [xid] = logline + else: + del newreq else: - rq = self.xid_request_map [xid] - rq.setReply (logline, linecount) - if rq.requestIsEntryOp() and rq.entryname == self.trackfilename: - self.trackedfilehandles.append (rq.replygfid) - - def analyzeLine (self, logline, linecount): - tokens = logline.strip ().split (" ") - msgtype = 0 - - if "XID:" not in tokens: - return - - if "args:" in tokens: - msgtype = self.CALL - elif "NFS:" in tokens: - msgtype = self.REPLY - - if msgtype == self.CALL: - self.handle_call_line (logline, linecount) - elif msgtype == self.REPLY: - self.handle_reply_line (logline, linecount) - - def getStats (self): - if self.stats == 0: - return - rcount = len (self.xid_request_map.keys ()) - orphancount = len (self.orphan_replies.keys ()) - print("Requests: " + str(rcount) + ", Orphans: " + str(orphancount)) - - def dump (self): - self.getStats () - for rq in self.rqlist: - rq.dump () - del rq - - self.rqlist = [] - self.orphan_replies = {} - self.xid_request_map = {} + del newreq + else: + del newreq + + def handle_reply_line(self, logline, linecount): + tokens = logline.strip().split(" ") + + xididx = tokens.index("XID:") + xid = tokens[xididx + 1].strip(",") + if xid not in self.xid_request_map.keys(): + self.orphan_replies[xid] = logline + else: + rq = self.xid_request_map[xid] + rq.setReply(logline, linecount) + if rq.requestIsEntryOp() and rq.entryname == self.trackfilename: + self.trackedfilehandles.append(rq.replygfid) + + def analyzeLine(self, logline, linecount): + tokens = logline.strip().split(" ") + msgtype = 0 + + if "XID:" not in tokens: + return + + if "args:" in tokens: + msgtype = self.CALL + elif "NFS:" in tokens: + msgtype = self.REPLY + + if msgtype == self.CALL: + self.handle_call_line(logline, linecount) + elif msgtype == self.REPLY: + self.handle_reply_line(logline, linecount) + + def getStats(self): + if self.stats == 0: + return + rcount = len(self.xid_request_map.keys()) + orphancount = len(self.orphan_replies.keys()) + print("Requests: " + str(rcount) + ", Orphans: " + str(orphancount)) + + def dump(self): + self.getStats() + for rq in self.rqlist: + rq.dump() + del rq + + self.rqlist = [] + self.orphan_replies = {} + self.xid_request_map = {} linecount = 0 @@ -192,8 +199,8 @@ def dump (self): USAGE: --progress """ if "--progress" in sys.argv: - idx = sys.argv.index ("--progress") - progmsgcount = int(sys.argv[idx+1]) + idx = sys.argv.index("--progress") + progmsgcount = int(sys.argv[idx+1]) """ The replies for a NFS request can be separated by hundreds and even thousands @@ -210,8 +217,8 @@ def dump (self): USAGE: --dump """ if "--dump" in sys.argv: - idx = sys.argv.index ("--dump") - dumpinterval = int(sys.argv[idx+1]) + idx = sys.argv.index("--dump") + dumpinterval = int(sys.argv[idx+1]) """ The default operation of the script is to output all the requests mapped to @@ -223,9 +230,9 @@ def dump (self): USAGE: --trackfilename """ if "--trackfilename" in sys.argv: - idx = sys.argv.index ("--trackfilename") - trackfilename = sys.argv[idx + 1] - operation = TRACKFILENAME + idx = sys.argv.index("--trackfilename") + trackfilename = sys.argv[idx + 1] + operation = TRACKFILENAME """ At every dump interval, some stats are printed about the dumped lines. @@ -233,7 +240,7 @@ def dump (self): output. """ if "--nostats" in sys.argv: - stats = DISABLESTATS + stats = DISABLESTATS """ While tracking a file using --trackfilename, we're only given those @@ -246,16 +253,16 @@ def dump (self): USAGE: --trackfilename """ if "--tracknamefh" in sys.argv: - tracknamefh = ENABLE_TRACKNAME_FH + tracknamefh = ENABLE_TRACKNAME_FH -la = NFSLogAnalyzer (operation, trackfilename, tracknamefh, stats) +la = NFSLogAnalyzer(operation, trackfilename, tracknamefh, stats) for line in sys.stdin: - linecount = linecount + 1 - if linecount % dumpinterval == 0: - sys.stderr.write ("Dumping data..\n") - la.dump () - - if linecount % progmsgcount == 0: - sys.stderr.write ("Integrating line: "+ str(linecount) + "\n") - la.analyzeLine (line, linecount) + linecount = linecount + 1 + if linecount % dumpinterval == 0: + sys.stderr.write("Dumping data..\n") + la.dump() + + if linecount % progmsgcount == 0: + sys.stderr.write("Integrating line: " + str(linecount) + "\n") + la.analyzeLine(line, linecount) From 43bb32647fdeff4d9e2356a0d1df84cc28703c92 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:50:12 -0300 Subject: [PATCH 49/72] python linter: clean extras/glusterfs-georep-upgrade.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/glusterfs-georep-upgrade.py | 41 ++++++++++++++++++++++---------------- # 1 file changed, 24 insertions(+), 17 deletions(-) # extras/glusterfs-georep-upgrade.py | 41 ++++++++++++++++++++++---------------- # 1 file changed, 24 insertions(+), 17 deletions(-) --- extras/glusterfs-georep-upgrade.py | 41 +++++++++++++++++------------- 1 file changed, 24 insertions(+), 17 deletions(-) diff --git a/extras/glusterfs-georep-upgrade.py b/extras/glusterfs-georep-upgrade.py index 0a6f1740727..f0893f5e66e 100755 --- a/extras/glusterfs-georep-upgrade.py +++ b/extras/glusterfs-georep-upgrade.py @@ -13,20 +13,23 @@ import argparse import errno -import os, sys +import os import shutil +import sys from datetime import datetime + def find_htime_path(brick_path): dirs = [] htime_dir = os.path.join(brick_path, '.glusterfs/changelogs/htime') for file in os.listdir(htime_dir): - if os.path.isfile(os.path.join(htime_dir,file)) and file.startswith("HTIME"): + if os.path.isfile(os.path.join(htime_dir, file)) and file.startswith("HTIME"): dirs.append(os.path.join(htime_dir, file)) else: raise FileNotFoundError("%s unavailable" % (os.path.join(htime_dir, file))) return dirs + def modify_htime_file(brick_path): htime_file_path_list = find_htime_path(brick_path) @@ -34,24 +37,25 @@ def modify_htime_file(brick_path): changelog_path = os.path.join(brick_path, '.glusterfs/changelogs') temp_htime_path = os.path.join(changelog_path, 'htime/temp_htime_file') with open(htime_file_path, 'r') as htime_file, open(temp_htime_path, 'w') as temp_htime_file: - #extract epoch times from htime file + # extract epoch times from htime file paths = htime_file.read().split("\x00") for pth in paths: epoch_no = pth.split(".")[-1] changelog = os.path.basename(pth) - #convert epoch time to year, month and day + # convert epoch time to year, month and day if epoch_no != '': - date=(datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d")) - #update paths in temp htime file + date = (datetime.fromtimestamp(float(int(epoch_no))).strftime("%Y/%m/%d")) + # update paths in temp htime file temp_htime_file.write("%s/%s/%s\x00" % (changelog_path, date, changelog)) - #create directory in the format year/month/days + # create directory in the format year/month/days path = os.path.join(changelog_path, date) if changelog.startswith("CHANGELOG."): - os.makedirs(path, mode = 0o600, exist_ok = True) + os.makedirs(path, mode=0o600, exist_ok=True) - #copy existing changelogs to new directory structure, delete old changelog files + # copy existing changelogs to new directory structure, + # delete old changelog files try: shutil.copyfile(pth, os.path.join(path, changelog)) except shutil.SameFileError: @@ -59,17 +63,20 @@ def modify_htime_file(brick_path): else: os.remove(pth) - #rename temp_htime_file with htime file - os.rename(htime_file_path, os.path.join('%s.bak'%htime_file_path)) + # rename temp_htime_file with htime file + os.rename(htime_file_path, os.path.join('%s.bak' % htime_file_path)) os.rename(temp_htime_path, htime_file_path) + if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('brick_path', help="This upgrade script, which is to be run on\ - server side, takes brick path as the argument, \ - updates paths inside htime file and alters the directory structure \ - above the changelog files inorder to support new optimised format \ - of the directory structure as per \ - https://review.gluster.org/#/c/glusterfs/+/23733/") + parser.add_argument('brick_path', + help="This upgrade script, which is to be run on\ + server side, takes brick path as the argument, \ + updates paths inside htime file and alters the \ + directory structure above the changelog files in \ + order to support new optimised format of the \ + directory structure as per \ + https://review.gluster.org/#/c/glusterfs/+/23733/") args = parser.parse_args() modify_htime_file(args.brick_path) From b252000a7155958f326abbe2b7aabecff836a13e Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:04:53 -0300 Subject: [PATCH 50/72] python linter: clean extras/git-branch-diff.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/git-branch-diff.py | 170 +++++++++++++++++++++++----------------------- # 1 file changed, 85 insertions(+), 85 deletions(-) # extras/git-branch-diff.py | 170 +++++++++++++++++++++++----------------------- # 1 file changed, 85 insertions(+), 85 deletions(-) --- extras/git-branch-diff.py | 170 +++++++++++++++++++------------------- 1 file changed, 85 insertions(+), 85 deletions(-) diff --git a/extras/git-branch-diff.py b/extras/git-branch-diff.py index 382513e069e..4e6d821a76f 100755 --- a/extras/git-branch-diff.py +++ b/extras/git-branch-diff.py @@ -1,4 +1,4 @@ -#!/bin/python2 +!/bin/python2 """ Copyright (c) 2016 Red Hat, Inc. @@ -76,17 +76,20 @@ """ from __future__ import print_function + +import argparse import os +import subprocess import sys -import argparse + import commands -import subprocess import requests + class GitBranchDiff: - def __init__ (self): - " color symbols" - self.tick = u'\033[1;32m[ \u2714 ]\033[0m' + def __init__(self): + "Color symbols." + self.tick = u'\033[1;32m[ \u2714 ]\033[0m' self.cross = u'\033[1;31m[ \u2716 ]\033[0m' self.green_set = u'\033[1;34m' self.yello_set = u'\033[4;33m' @@ -94,26 +97,26 @@ def __init__ (self): self.parse_cmd_args() - " replace default values with actual values from command args" + # replace default values with actual values from command args self.g_author = self.argsdict['author'] - self.s_pattern = self.argsdict['source'] - self.t_pattern = self.argsdict['target'] - self.r_path = self.argsdict['path'] - self.options = ' '.join(self.argsdict['options']) + self.s_pattern = self.argsdict['source'] + self.t_pattern = self.argsdict['target'] + self.r_path = self.argsdict['path'] + self.options = ' '.join(self.argsdict['options']) self.gerrit_server = "http://review.gluster.org" - def check_dir_exist (self, os_path): - " checks whether given path exist" + def check_dir_exist(self, os_path): + "Check whether given path exist." path_list = os_path.split() for path in path_list: if not os.path.exists(path): raise argparse.ArgumentTypeError("'%s' path %s is not valid" - %(os_path, path)) + % (os_path, path)) return os_path - def check_pattern_exist (self): - " defend to check given branch[s] exit" + def check_pattern_exist(self): + "Defend to check given branch[s] exit." status_sbr, op = commands.getstatusoutput('git log ' + self.s_pattern) status_tbr, op = commands.getstatusoutput('git log ' + @@ -127,8 +130,8 @@ def check_pattern_exist (self): self.parser.print_help() exit(status_tbr) - def check_author_exist (self): - " defend to check given author exist, format in case of multiple" + def check_author_exist(self): + "Defend to check given author exist, format in case of multiple." contrib_list = ['', '*', 'all', 'All', 'ALL', 'null', 'Null', 'NULL'] if self.g_author in contrib_list: self.g_author = "" @@ -136,118 +139,115 @@ def check_author_exist (self): ide_list = self.g_author.split(',') for ide in ide_list: cmd4 = 'git log ' + self.s_pattern + ' --author=' + ide - c_list = subprocess.check_output(cmd4, shell = True) + c_list = subprocess.check_output(cmd4, shell=True) if len(c_list) is 0: - print("Error: --author=%s doesn't exit" %self.g_author) - print("see '%s --help'" %__file__) + print("Error: --author=%s doesn't exit" % self.g_author) + print("see '%s --help'" % __file__) exit(1) if len(ide_list) > 1: self.g_author = "\|".join(ide_list) - def connected_to_gerrit (self): - "check if gerrit server is reachable" + def connected_to_gerrit(self): + "Check if gerrit server is reachable." try: r = requests.get(self.gerrit_server, timeout=3) return True except requests.Timeout as err: - " request timed out" - print("Warning: failed to get list of open review commits on " \ - "gerrit.\n" \ - "hint: Request timed out! gerrit server could possibly " \ - "slow ...\n") + # request timed out + print("Warning: failed to get list of open review commits on gerrit.\n" + "hint: Request timed out! gerrit server could possibly slow ...\n") return False except requests.RequestException as err: - " handle other errors" - print("Warning: failed to get list of open review commits on " \ - "gerrit\n" \ + # handle other errors + print("Warning: failed to get list of open review commits on gerrit\n" "hint: check with internet connection ...\n") return False - def parse_cmd_args (self): - " command line parser" + def parse_cmd_args(self): + "Parse command line." author = subprocess.check_output('git config user.email', - shell = True).rstrip('\n') + shell=True).rstrip('\n') source = "remotes/origin/master" - options = [' --pretty=format:"%h %s" '] + options = [' --pretty=format:"%h %s" '] path = subprocess.check_output('git rev-parse --show-toplevel', - shell = True).rstrip('\n') - self.parser = argparse.ArgumentParser(description = 'git wrapper to ' + shell=True).rstrip('\n') + self.parser = argparse.ArgumentParser(description='git wrapper to ' 'diff local or remote branches/' 'tags/commit-ranges') self.parser.add_argument('-s', '--source', - help = 'source pattern, it could be a branch,' - ' tag or a commit range', - default = source, - dest = 'source') + help='source pattern, it could be a branch,' + ' tag or a commit range', + default=source, + dest='source') self.parser.add_argument('-t', '--target', - help = 'target pattern, it could be a branch,' - ' tag or a commit range', - required = True, - dest = 'target') + help='target pattern, it could be a branch,' + ' tag or a commit range', + required=True, + dest='target') self.parser.add_argument('-a', '--author', - help = 'default: git config name/email, ' - 'to provide multiple specify comma' - ' separated values', - default = author, - dest = 'author') + help='default: git config name/email, ' + 'to provide multiple specify comma' + ' separated values', + default=author, + dest='author') self.parser.add_argument('-p', '--path', - type = self.check_dir_exist, - help = 'show source and target diff w.r.t ' - 'given path, to provide multiple ' - 'specify space in between them', - default = path, - dest = 'path') + type=self.check_dir_exist, + help='show source and target diff w.r.t ' + 'given path, to provide multiple ' + 'specify space in between them', + default=path, + dest='path') self.parser.add_argument('-o', '--options', - help = 'add other git options such as ' - '--after=<>, --before=<> etc. ' - 'experts use;', - default = options, - dest = 'options', + help='add other git options such as ' + '--after=<>, --before=<> etc. ' + 'experts use;', + default=options, + dest='options', action='append') self.argsdict = vars(self.parser.parse_args()) - def print_output (self): - " display the result list" + def print_output(self): + "Display the result list." print("\n------------------------------------------------------------\n") print(self.tick + " Successfully Backported changes:") - print(' {' + 'from: ' + self.s_pattern + \ - ' to: '+ self.t_pattern + '}\n') + print(' {' + 'from: ' + self.s_pattern + + ' to: ' + self.t_pattern + '}\n') for key, value in self.s_dict.items(): if value in self.t_dict.itervalues(): - print("[%s%s%s] %s" %(self.yello_set, - key, - self.color_unset, - value)) + print("[%s%s%s] %s" % (self.yello_set, + key, + self.color_unset, + value)) print("\n------------------------------------------------------------\n") print(self.cross + " Missing patches in " + self.t_pattern + ':\n') if self.connected_to_gerrit(): cmd3 = "git review -r origin -l" - review_list = subprocess.check_output(cmd3, shell = True).split('\n') + review_list = subprocess.check_output(cmd3, shell=True).split('\n') else: review_list = [] for key, value in self.s_dict.items(): if value not in self.t_dict.itervalues(): if any(value in s for s in review_list): - print("[%s%s%s] %s %s(under review)%s" %(self.yello_set, - key, - self.color_unset, - value, - self.green_set, - self.color_unset)) + print("[%s%s%s] %s %s(under review)%s" % (self.yello_set, + key, + self.color_unset, + value, + self.green_set, + self.color_unset)) else: - print("[%s%s%s] %s" %(self.yello_set, - key, - self.color_unset, - value)) + print("[%s%s%s] %s" % (self.yello_set, + key, + self.color_unset, + value)) print("\n------------------------------------------------------------\n") - def main (self): + def main(self): self.check_pattern_exist() self.check_author_exist() @@ -259,12 +259,12 @@ def main (self): cmd2 = 'git log' + self.options + ' ' + self.t_pattern + \ ' ' + self.r_path - s_list = subprocess.check_output(cmd1, shell = True).split('\n') - t_list = subprocess.check_output(cmd2, shell = True) + s_list = subprocess.check_output(cmd1, shell=True).split('\n') + t_list = subprocess.check_output(cmd2, shell=True) if len(t_list) is 0: - print("No commits in the target: %s" %self.t_pattern) - print("see '%s --help'" %__file__) + print("No commits in the target: %s" % self.t_pattern) + print("see '%s --help'" % __file__) exit() else: t_list = t_list.split('\n') From 8dd25fd67f4d7e0be6038e0f2ec959880713f48a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:51:24 -0300 Subject: [PATCH 51/72] python linter: clean extras/distributed-testing/distributed-test-runner.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # .../distributed-testing/distributed-test-runner.py | 34 ++++++++++++---------- # 1 file changed, 18 insertions(+), 16 deletions(-) # .../distributed-testing/distributed-test-runner.py | 34 ++++++++++++---------- # 1 file changed, 18 insertions(+), 16 deletions(-) --- .../distributed-test-runner.py | 34 ++++++++++--------- 1 file changed, 18 insertions(+), 16 deletions(-) diff --git a/extras/distributed-testing/distributed-test-runner.py b/extras/distributed-testing/distributed-test-runner.py index 5a07e2feab1..10d6d3d5827 100755 --- a/extras/distributed-testing/distributed-test-runner.py +++ b/extras/distributed-testing/distributed-test-runner.py @@ -2,24 +2,25 @@ from __future__ import absolute_import from __future__ import division -from __future__ import unicode_literals from __future__ import print_function -import re -import sys -import fcntl +from __future__ import unicode_literals + +import argparse import base64 -import threading -import socket +import fcntl import os +import re import shlex -import argparse +import socket import subprocess +import sys +import threading import time +import uuid +import httplib +import md5 import SimpleXMLRPCServer import xmlrpclib -import md5 -import httplib -import uuid DEFAULT_PORT = 9999 TEST_TIMEOUT_S = 15 * 60 @@ -193,12 +194,12 @@ def call(self, cmd): def ssh(self, hostname, cmd, id_rsa=None): flags = "" if not id_rsa else "-i " + id_rsa return self.call("timeout %s ssh %s root@%s \"%s\"" % - (SSH_TIMEOUT_S, flags, hostname, cmd)) + (SSH_TIMEOUT_S, flags, hostname, cmd)) def scp(self, hostname, src, dest, id_rsa=None): flags = "" if not id_rsa else "-i " + id_rsa return self.call("timeout %s scp %s %s root@%s:%s" % - (SSH_TIMEOUT_S, flags, src, hostname, dest)) + (SSH_TIMEOUT_S, flags, src, hostname, dest)) def output(self, cmd, cwd=None): Log.debug("%s> %s" % (cwd, cmd)) @@ -374,7 +375,8 @@ def build(self, id, asan=False): self.shell.call("make clean") env = "ASAN_ENABLED=1" if asan else "" return self.shell.call( - "%s ./extras/distributed-testing/distributed-test-build.sh" % env) == 0 + "%s ./extras/distributed-testing/distributed-test-build.sh" + % env) == 0 @synchronized def install(self, id): @@ -400,8 +402,8 @@ def prove(self, id, test, timeout, valgrind="no", asan_noleaks=True): else: cmd = "prove -v" - status = self.shell.call( - "%s timeout %s %s %s" % (env, timeout, cmd, test)) + status = self.shell.call("%s timeout %s %s %s" + % (env, timeout, cmd, test)) if status != 0: return (False, self._log_content()) @@ -497,7 +499,7 @@ def _copy(self): def _copy_gzip(self): Log.cli("<%s> copying and compiling %s to remote" % - (self.logid, self.path)) + (self.logid, self.path)) data = encode(get_file_content(patch_file())) Log.debug("GZIP size = %s B" % len(data)) return self.proxy.copygzip(self.cb.id, data) From 1f69defae861c8c52946227ba8e906195d15899c Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 21:51:46 -0300 Subject: [PATCH 52/72] python linter: clean extras/debug/gfcore.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/debug/gfcore.py | 6 ++++-- # 1 file changed, 4 insertions(+), 2 deletions(-) # extras/debug/gfcore.py | 6 ++++-- # 1 file changed, 4 insertions(+), 2 deletions(-) --- extras/debug/gfcore.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/extras/debug/gfcore.py b/extras/debug/gfcore.py index 9f097f0de42..d2df7e4c3db 100755 --- a/extras/debug/gfcore.py +++ b/extras/debug/gfcore.py @@ -20,6 +20,7 @@ def launch(): "-x", __file__ ]) + class GFCore(object): def __init__(self, count, base): self.count = count @@ -30,8 +31,8 @@ def __init__(self, count, base): self.cont() - def cont(self, quit = False): - if not(quit) and (self.count > 0): + def cont(self, quit=False): + if not (quit) and (self.count > 0): gdb.execute('continue') else: gdb.execute('set gf_signal_on_assert = 0') @@ -60,6 +61,7 @@ def gf_stop(self, event): self.cont(quit) + # Module 'gdb' is not available when running outside gdb. try: import gdb From b9bee847e3ce03cff433c19d79216bfbf9e29e86 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 18:51:27 -0300 Subject: [PATCH 53/72] python linter: clean extras/create_new_xlator/generate_xlator.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/create_new_xlator/generate_xlator.py | 303 ++++++++++++++-------------- # 1 file changed, 155 insertions(+), 148 deletions(-) # extras/create_new_xlator/generate_xlator.py | 303 ++++++++++++++-------------- # 1 file changed, 155 insertions(+), 148 deletions(-) --- extras/create_new_xlator/generate_xlator.py | 303 ++++++++++---------- 1 file changed, 155 insertions(+), 148 deletions(-) diff --git a/extras/create_new_xlator/generate_xlator.py b/extras/create_new_xlator/generate_xlator.py index 983868c04db..2e6e9438635 100755 --- a/extras/create_new_xlator/generate_xlator.py +++ b/extras/create_new_xlator/generate_xlator.py @@ -1,14 +1,18 @@ #!/usr/bin/python3 + from __future__ import print_function + import os import re -import sys import string +import sys import time +from generator import ops, xlator_cbks, xlator_dumpops + path = os.path.abspath(os.path.dirname(__file__)) + '/../../libglusterfs/src' sys.path.append(path) -from generator import ops, xlator_cbks, xlator_dumpops + MAKEFILE_FMT = """ xlator_LTLIBRARIES = @XL_NAME@.la @@ -29,180 +33,183 @@ def get_error_arg(type_str): - if type_str.find(" *") != -1: - return "NULL" - return "-1" + if type_str.find(" *") != -1: + return "NULL" + return "-1" def get_param(names, types): - # Convert two separate tuples to one of (name, type) sub-tuples. - as_tuples = list(zip(types, names)) - # Convert each sub-tuple into a "type name" string. - as_strings = [' '.join(item) for item in as_tuples] - # Join all of those into one big string. - return ',\n\t'.join(as_strings) + # Convert two separate tuples to one of (name, type) sub-tuples. + as_tuples = list(zip(types, names)) + # Convert each sub-tuple into a "type name" string. + as_strings = [' '.join(item) for item in as_tuples] + # Join all of those into one big string. + return ',\n\t'.join(as_strings) def generate(tmpl, name, table): - w_arg_names = [a[1] for a in table[name] if a[0] == 'fop-arg'] - w_arg_types = [a[2] for a in table[name] if a[0] == 'fop-arg'] - u_arg_names = [a[1] for a in table[name] if a[0] == 'cbk-arg'] - u_arg_types = [a[2] for a in table[name] if a[0] == 'cbk-arg'] - fn_arg_names = [a[1] for a in table[name] if a[0] == 'fn-arg'] - fn_arg_types = [a[2] for a in table[name] if a[0] == 'fn-arg'] - ret_type = [a[1] for a in table[name] if a[0] == 'ret-val'] - ret_var = [a[2] for a in table[name] if a[0] == 'ret-val'] - - sdict = {} - #Parameters are (t1, var1), (t2, var2)... - #Args are (var1, var2,...) - sdict["@WIND_ARGS@"] = ', '.join(w_arg_names) - sdict["@UNWIND_ARGS@"] = ', '.join(u_arg_names) - sdict["@ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, u_arg_types))) - sdict["@WIND_PARAMS@"] = get_param(w_arg_names, w_arg_types) - sdict["@UNWIND_PARAMS@"] = get_param(u_arg_names, u_arg_types) - sdict["@FUNC_PARAMS@"] = get_param(fn_arg_names, fn_arg_types) - sdict["@NAME@"] = name - sdict["@FOP_PREFIX@"] = fop_prefix - sdict["@RET_TYPE@"] = ''.join(ret_type) - sdict["@RET_VAR@"] = ''.join(ret_var) - - for old, new in sdict.items(): - tmpl = tmpl.replace(old, new) - # TBD: reindent/reformat the result for maximum readability. - return tmpl + w_arg_names = [a[1] for a in table[name] if a[0] == 'fop-arg'] + w_arg_types = [a[2] for a in table[name] if a[0] == 'fop-arg'] + u_arg_names = [a[1] for a in table[name] if a[0] == 'cbk-arg'] + u_arg_types = [a[2] for a in table[name] if a[0] == 'cbk-arg'] + fn_arg_names = [a[1] for a in table[name] if a[0] == 'fn-arg'] + fn_arg_types = [a[2] for a in table[name] if a[0] == 'fn-arg'] + ret_type = [a[1] for a in table[name] if a[0] == 'ret-val'] + ret_var = [a[2] for a in table[name] if a[0] == 'ret-val'] + + sdict = {} + # Parameters are (t1, var1), (t2, var2)... + # Args are (var1, var2,...) + sdict["@WIND_ARGS@"] = ', '.join(w_arg_names) + sdict["@UNWIND_ARGS@"] = ', '.join(u_arg_names) + sdict["@ERROR_ARGS@"] = ', '.join(list(map(get_error_arg, u_arg_types))) + sdict["@WIND_PARAMS@"] = get_param(w_arg_names, w_arg_types) + sdict["@UNWIND_PARAMS@"] = get_param(u_arg_names, u_arg_types) + sdict["@FUNC_PARAMS@"] = get_param(fn_arg_names, fn_arg_types) + sdict["@NAME@"] = name + sdict["@FOP_PREFIX@"] = fop_prefix + sdict["@RET_TYPE@"] = ''.join(ret_type) + sdict["@RET_VAR@"] = ''.join(ret_var) + + for old, new in sdict.items(): + tmpl = tmpl.replace(old, new) + # TBD: reindent/reformat the result for maximum readability. + return tmpl def gen_xlator(): - xl = open(src_dir_path+"/"+xl_name+".c", 'w+') + xl = open(src_dir_path+"/"+xl_name+".c", 'w+') - print(COPYRIGHT, file=xl) - print(fragments["INCLUDE_IN_SRC_FILE"].replace("@XL_NAME@", - xl_name), file=xl) + print(COPYRIGHT, file=xl) + print(fragments["INCLUDE_IN_SRC_FILE"].replace("@XL_NAME@", + xl_name), file=xl) - #Generate cbks and fops - for fop in ops: - print(generate(fragments["CBK_TEMPLATE"], fop, ops), file=xl) - print(generate(fragments["FOP_TEMPLATE"], fop, ops), file=xl) + # Generate cbks and fops + for fop in ops: + print(generate(fragments["CBK_TEMPLATE"], fop, ops), file=xl) + print(generate(fragments["FOP_TEMPLATE"], fop, ops), file=xl) - for cbk in xlator_cbks: - print(generate(fragments["FUNC_TEMPLATE"], cbk, - xlator_cbks), file=xl) + for cbk in xlator_cbks: + print(generate(fragments["FUNC_TEMPLATE"], cbk, + xlator_cbks), file=xl) - for dops in xlator_dumpops: - print(generate(fragments["FUNC_TEMPLATE"], dops, - xlator_dumpops), file=xl) + for dops in xlator_dumpops: + print(generate(fragments["FUNC_TEMPLATE"], dops, + xlator_dumpops), file=xl) - #Generate fop table - print("struct xlator_fops fops = {", file=xl) - for fop in ops: - print(" .{0:20} = {1}_{2},".format(fop, fop_prefix, fop), file=xl) - print("};", file=xl) + # Generate fop table + print("struct xlator_fops fops = {", file=xl) + for fop in ops: + print(" .{0:20} = {1}_{2},".format(fop, fop_prefix, fop), file=xl) + print("};", file=xl) - #Generate xlator_cbks table - print("struct xlator_cbks cbks = {", file=xl) - for cbk in xlator_cbks: - print(" .{0:20} = {1}_{2},".format(cbk, fop_prefix, cbk), file=xl) - print("};", file=xl) + # Generate xlator_cbks table + print("struct xlator_cbks cbks = {", file=xl) + for cbk in xlator_cbks: + print(" .{0:20} = {1}_{2},".format(cbk, fop_prefix, cbk), file=xl) + print("};", file=xl) - #Generate xlator_dumpops table - print("struct xlator_dumpops dumpops = {", file=xl) - for dops in xlator_dumpops: - print(" .{0:20} = {1}_{2},".format(dops, fop_prefix, dops), file=xl) - print("};", file=xl) + # Generate xlator_dumpops table + print("struct xlator_dumpops dumpops = {", file=xl) + for dops in xlator_dumpops: + print(" .{0:20} = {1}_{2},".format(dops, fop_prefix, dops), file=xl) + print("};", file=xl) - xlator_methods = fragments["XLATOR_METHODS"].replace("@XL_NAME@", xl_name) - xlator_methods = xlator_methods.replace("@FOP_PREFIX@", fop_prefix) - print(xlator_methods, file=xl) + xlator_methods = fragments["XLATOR_METHODS"].replace("@XL_NAME@", xl_name) + xlator_methods = xlator_methods.replace("@FOP_PREFIX@", fop_prefix) + print(xlator_methods, file=xl) - xl.close() + xl.close() def create_dir_struct(): - if not os.path.exists(dir_path+"/src"): - os.makedirs(dir_path+"/src") + if not os.path.exists(dir_path+"/src"): + os.makedirs(dir_path+"/src") def gen_header_files(): - upname = xl_name_no_hyphen.upper() - h = open(src_dir_path+"/"+xl_name+".h", 'w+') - print(COPYRIGHT, file=h) - txt = fragments["HEADER_FMT"].replace("@HFL_NAME@", upname) - txt = txt.replace("@XL_NAME@", xl_name) - print(txt, file=h) - h.close() - - h = open(src_dir_path+"/"+xl_name+"-mem-types.h", 'w+') - print(COPYRIGHT, file=h) - txt = fragments["MEM_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MEM_TYPES") - txt = txt.replace("@FOP_PREFIX@", fop_prefix) - print(txt, file=h) - h.close() - - h = open(src_dir_path+"/"+xl_name+"-messages.h", 'w+') - print(COPYRIGHT, file=h) - txt = fragments["MSG_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MESSAGES") - txt = txt.replace("@FOP_PREFIX@", fop_prefix.upper()) - print(txt, file=h) - h.close() + upname = xl_name_no_hyphen.upper() + h = open(src_dir_path+"/"+xl_name+".h", 'w+') + print(COPYRIGHT, file=h) + txt = fragments["HEADER_FMT"].replace("@HFL_NAME@", upname) + txt = txt.replace("@XL_NAME@", xl_name) + print(txt, file=h) + h.close() + + h = open(src_dir_path+"/"+xl_name+"-mem-types.h", 'w+') + print(COPYRIGHT, file=h) + txt = fragments["MEM_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MEM_TYPES") + txt = txt.replace("@FOP_PREFIX@", fop_prefix) + print(txt, file=h) + h.close() + + h = open(src_dir_path+"/"+xl_name+"-messages.h", 'w+') + print(COPYRIGHT, file=h) + txt = fragments["MSG_HEADER_FMT"].replace("@HFL_NAME@", upname+"_MESSAGES") + txt = txt.replace("@FOP_PREFIX@", fop_prefix.upper()) + print(txt, file=h) + h.close() def gen_makefiles(): - m = open(dir_path+"/Makefile.am", 'w+') - print("SUBDIRS = src\n\nCLEANFILES =", file=m) - m.close() - - m = open(src_dir_path+"/Makefile.am", 'w+') - txt = MAKEFILE_FMT.replace("@XL_NAME@", xl_name) - txt = txt.replace("@XL_NAME_NO_HYPHEN@", xl_name_no_hyphen) - txt = txt.replace("@XL_TYPE@", xlator_type) - print(txt, file=m) - m.close() - -def get_copyright (): - return fragments["CP"].replace("@CURRENT_YEAR@", - time.strftime("%Y")) - -def load_fragments (): - pragma_re = re.compile('pragma fragment (.*)') - cur_symbol = None - cur_value = "" - result = {} - basepath = os.path.abspath(os.path.dirname(__file__)) - fragpath = basepath + "/new-xlator.c.tmpl" - for line in open(fragpath, "r").readlines(): - m = pragma_re.search(line) - if m: - if cur_symbol: - result[cur_symbol] = cur_value - cur_symbol = m.group(1) - cur_value = "" - else: - cur_value += line - if cur_symbol: + m = open(dir_path+"/Makefile.am", 'w+') + print("SUBDIRS = src\n\nCLEANFILES =", file=m) + m.close() + + m = open(src_dir_path+"/Makefile.am", 'w+') + txt = MAKEFILE_FMT.replace("@XL_NAME@", xl_name) + txt = txt.replace("@XL_NAME_NO_HYPHEN@", xl_name_no_hyphen) + txt = txt.replace("@XL_TYPE@", xlator_type) + print(txt, file=m) + m.close() + + +def get_copyright(): + return fragments["CP"].replace("@CURRENT_YEAR@", + time.strftime("%Y")) + + +def load_fragments(): + pragma_re = re.compile('pragma fragment (.*)') + cur_symbol = None + cur_value = "" + result = {} + basepath = os.path.abspath(os.path.dirname(__file__)) + fragpath = basepath + "/new-xlator.c.tmpl" + for line in open(fragpath, "r").readlines(): + m = pragma_re.search(line) + if m: + if cur_symbol: result[cur_symbol] = cur_value - return result + cur_symbol = m.group(1) + cur_value = "" + else: + cur_value += line + if cur_symbol: + result[cur_symbol] = cur_value + return result -if __name__ == '__main__': - if len(sys.argv) < 3: - print("USAGE: ./gen_xlator ") - sys.exit(0) +if __name__ == '__main__': - xl_name = sys.argv[2] - xl_name_no_hyphen = xl_name.replace("-", "_") - if sys.argv[1].endswith('/'): - dir_path = sys.argv[1] + xl_name - else: - dir_path = sys.argv[1] + "/" + xl_name - xlator_type = os.path.basename(sys.argv[1]) - fop_prefix = sys.argv[3] - src_dir_path = dir_path + "/src" - - fragments = load_fragments() - - COPYRIGHT = get_copyright() - create_dir_struct() - gen_xlator() - gen_header_files() - gen_makefiles() + if len(sys.argv) < 3: + print("USAGE: ./gen_xlator ") + sys.exit(0) + + xl_name = sys.argv[2] + xl_name_no_hyphen = xl_name.replace("-", "_") + if sys.argv[1].endswith('/'): + dir_path = sys.argv[1] + xl_name + else: + dir_path = sys.argv[1] + "/" + xl_name + xlator_type = os.path.basename(sys.argv[1]) + fop_prefix = sys.argv[3] + src_dir_path = dir_path + "/src" + + fragments = load_fragments() + + COPYRIGHT = get_copyright() + create_dir_struct() + gen_xlator() + gen_header_files() + gen_makefiles() From e592ad9cd77ce52a465445d01ba5baf48edd623b Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:43:59 -0300 Subject: [PATCH 54/72] python linter: clean extras/cliutils/cliutils.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/cliutils/cliutils.py | 6 +++--- # 1 file changed, 3 insertions(+), 3 deletions(-) # extras/cliutils/cliutils.py | 6 +++--- # 1 file changed, 3 insertions(+), 3 deletions(-) --- extras/cliutils/cliutils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/extras/cliutils/cliutils.py b/extras/cliutils/cliutils.py index 55fbaf56704..02a5d597fe8 100644 --- a/extras/cliutils/cliutils.py +++ b/extras/cliutils/cliutils.py @@ -59,21 +59,21 @@ def oknotok(flag): def output_error(message, errcode=1): - print (message, file=sys.stderr) + print(message, file=sys.stderr) sys.exit(errcode) def node_output_ok(message=""): # Prints Success JSON output and exits with returncode zero out = {"ok": True, "nodeid": get_node_uuid(), "output": message} - print (json.dumps(out)) + print(json.dumps(out)) sys.exit(0) def node_output_notok(message): # Prints Error JSON output and exits with returncode zero out = {"ok": False, "nodeid": get_node_uuid(), "error": message} - print (json.dumps(out)) + print(json.dumps(out)) sys.exit(0) From a13c869aaec742d4497c1fde79297c2cd3bbeeca Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 23:49:39 -0300 Subject: [PATCH 55/72] python linter: clean events/src/utils.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # events/src/utils.py | 11 +++++------ # 1 file changed, 5 insertions(+), 6 deletions(-) # events/src/utils.py | 11 +++++------ # 1 file changed, 5 insertions(+), 6 deletions(-) --- events/src/utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/events/src/utils.py b/events/src/utils.py index 9a7d8539659..738c355e06b 100644 --- a/events/src/utils.py +++ b/events/src/utils.py @@ -39,7 +39,6 @@ CERTS_DIR) from gfevents import eventtypes - # Webhooks list _webhooks = {} _webhooks_file_mtime = 0 @@ -315,11 +314,11 @@ def plugin_webhook(message): class NamedTempOpen(object): - """ - This class is used to create a temporary file which is then written to with contents. + """ Create a temporary file which is then written to with contents. + The temp file is then persisted with the give name by calling os.rename(). - This class is used to avoid the data loss or truncation in case of multiple processes - writing to the same file without the use of fcntl locks. + This class is used to avoid the data loss or truncation in case of multiple + processes writing to the same file without the use of fcntl locks. The temporary file is created in the dest dir of the file. """ @@ -353,8 +352,8 @@ def __exit__(self, ex_type, ex_val, ex_tb): else: os.rename(self.fileobj.name, self.filename) -class LockedOpen(object): +class LockedOpen(object): def __init__(self, filename, *args, **kwargs): self.filename = filename self.open_args = args From 3a595555342e5444634bedbea748d929c7327f43 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:25:05 -0300 Subject: [PATCH 56/72] python linter: clean events/src/peer_eventsapi.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # events/src/peer_eventsapi.py | 50 +++++++++++++++++++++----------------------- # 1 file changed, 24 insertions(+), 26 deletions(-) # events/src/peer_eventsapi.py | 50 +++++++++++++++++++++----------------------- # 1 file changed, 24 insertions(+), 26 deletions(-) --- events/src/peer_eventsapi.py | 50 +++++++++++++++++------------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/events/src/peer_eventsapi.py b/events/src/peer_eventsapi.py index 90f93c1b548..4e4c5c2ea30 100644 --- a/events/src/peer_eventsapi.py +++ b/events/src/peer_eventsapi.py @@ -11,24 +11,22 @@ # from __future__ import print_function -import os -import json -from errno import EEXIST + import fcntl -from errno import EACCES, EAGAIN +import json +import os import signal import sys import time +from errno import EACCES, EAGAIN, EEXIST import requests from prettytable import PrettyTable - from gluster.cliutils import (Cmd, node_output_ok, node_output_notok, sync_file_to_peers, GlusterCmdException, output_error, execute_in_peers, runcli, set_common_args_func) from gfevents.utils import LockedOpen, get_jwt_token, save_https_cert, NamedTempOpen - from gfevents.eventsapiconf import (WEBHOOKS_FILE_TO_SYNC, WEBHOOKS_FILE, DEFAULT_CONFIG_FILE, @@ -53,7 +51,7 @@ def handle_output_error(err, errcode=1, json_output=False): if json_output: - print (json.dumps({ + print(json.dumps({ "output": "", "error": err })) @@ -92,7 +90,7 @@ def create_webhooks_file_if_not_exists(args): if not os.path.exists(WEBHOOKS_FILE): with NamedTempOpen(WEBHOOKS_FILE, "w") as f: - f.write(json.dumps({})) + f.write(json.dumps({})) def boolify(value): @@ -103,9 +101,9 @@ def boolify(value): def mkdirp(path, exit_on_err=False, logger=None): - """ - Try creating required directory structure - ignore EEXIST and raise exception for rest of the errors. + """Try creating required directory structure. + + Ignore EEXIST and raise exception for rest of the errors. Print error in stderr and exit """ try: @@ -210,12 +208,12 @@ def sync_to_peers(args): ret = ERROR_PARTIAL_SUCCESS if args.json: - print (json.dumps({ + print(json.dumps({ "output": json_out, "error": "" })) else: - print (table) + print(table) # If sync status is not ok for any node set error code as partial success sys.exit(ret) @@ -262,12 +260,12 @@ class ReloadCmd(Cmd): def run(self, args): out = action_handle("reload", args.json) if args.json: - print (json.dumps({ + print(json.dumps({ "output": out, "error": "" })) else: - print (out) + print(out) class NodeStatus(Cmd): @@ -289,21 +287,21 @@ def run(self, args): if args.json: json_out["webhooks"] = webhooks.keys() else: - print ("Webhooks: " + ("" if webhooks else "None")) + print("Webhooks: " + ("" if webhooks else "None")) for w in webhooks: - print (w) + print(w) - print () + print() out = action_handle("status", args.json) if args.json: json_out["data"] = out - print (json.dumps({ + print(json.dumps({ "output": json_out, "error": "" })) else: - print (out) + print(out) class WebhookAddCmd(Cmd): @@ -496,12 +494,12 @@ def run(self, args): ret = ERROR_PARTIAL_SUCCESS if args.json: - print (json.dumps({ + print(json.dumps({ "output": json_out, "error": "" })) else: - print (table) + print(table) sys.exit(ret) @@ -529,7 +527,7 @@ def run(self, args): else: json_out[args.name] = data[args.name] - print (json.dumps({ + print(json.dumps({ "output": json_out, "error": "" })) @@ -541,7 +539,7 @@ def run(self, args): else: table.add_row([args.name, data[args.name]]) - print (table) + print(table) def read_file_content_json(fname): @@ -600,7 +598,7 @@ def run(self, args): restart = True if restart: - print ("\nRestart glustereventsd in all nodes") + print("\nRestart glustereventsd in all nodes") sync_to_peers(args) @@ -648,7 +646,7 @@ def run(self, args): break if restart: - print ("\nRestart glustereventsd in all nodes") + print("\nRestart glustereventsd in all nodes") sync_to_peers(args) From b26929a58ffff950acf815d0f0b0851397d696eb Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 16:40:56 -0300 Subject: [PATCH 57/72] python linter: clean events/src/glustereventsd.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # events/src/glustereventsd.py | 19 +++++++++++++------ # 1 file changed, 13 insertions(+), 6 deletions(-) # events/src/glustereventsd.py | 19 +++++++++++++------ # 1 file changed, 13 insertions(+), 6 deletions(-) --- events/src/glustereventsd.py | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/events/src/glustereventsd.py b/events/src/glustereventsd.py index bd8b1349cf3..6d0fa8d7795 100644 --- a/events/src/glustereventsd.py +++ b/events/src/glustereventsd.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 # -*- coding: utf-8 -*- +"""Events glustereventsd.""" # # Copyright (c) 2016 Red Hat, Inc. # This file is part of GlusterFS. @@ -11,31 +12,37 @@ # from __future__ import print_function -import sys + import signal +import sys import threading + try: import socketserver except ImportError: import SocketServer as socketserver + import socket from argparse import ArgumentParser, RawDescriptionHelpFormatter - from eventtypes import all_events + import handlers import utils from eventsapiconf import SERVER_ADDRESSv4, SERVER_ADDRESSv6, PID_FILE from eventsapiconf import AUTO_BOOL_ATTRIBUTES, AUTO_INT_ATTRIBUTES from utils import logger, PidFile, PidFileLockFailed, boolify + # Subclass so that specifically IPv4 packets are captured class UDPServerv4(socketserver.ThreadingUDPServer): address_family = socket.AF_INET + # Subclass so that specifically IPv6 packets are captured class UDPServerv6(socketserver.ThreadingUDPServer): address_family = socket.AF_INET6 + class GlusterEventsRequestHandler(socketserver.BaseRequestHandler): def handle(self): @@ -114,24 +121,24 @@ def init_event_server(): # Creating the Eventing Server, UDP Server for IPv4 packets try: serverv4 = UDPServerv4((SERVER_ADDRESSv4, port), - GlusterEventsRequestHandler) + GlusterEventsRequestHandler) except socket.error as e: sys.stderr.write("Failed to start Eventsd for IPv4: {0}\n".format(e)) serverv4 = None if serverv4: server_thread1 = threading.Thread(target=UDP_server_thread, - args=(serverv4,)) + args=(serverv4,)) server_thread1.start() # Creating the Eventing Server, UDP Server for IPv6 packets try: serverv6 = UDPServerv6((SERVER_ADDRESSv6, port), - GlusterEventsRequestHandler) + GlusterEventsRequestHandler) except socket.error as e: sys.stderr.write("Failed to start Eventsd for IPv6: {0}\n".format(e)) serverv6 = None if serverv6: server_thread2 = threading.Thread(target=UDP_server_thread, - args=(serverv6,)) + args=(serverv6,)) server_thread2.start() if serverv4 is None and serverv6 is None: sys.stderr.write("Failed to start Eventsd: {0}\n".format(e)) From d9993293ddcfc5ea3e2686fcde460eaa5a8fb029 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Mon, 3 Mar 2025 22:01:35 -0300 Subject: [PATCH 58/72] python linter: clean events/src/eventsapiconf.py.in This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # events/src/eventsapiconf.py.in | 4 +++- # 1 file changed, 3 insertions(+), 1 deletion(-) # events/src/eventsapiconf.py.in | 4 +++- # 1 file changed, 3 insertions(+), 1 deletion(-) --- events/src/eventsapiconf.py.in | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/events/src/eventsapiconf.py.in b/events/src/eventsapiconf.py.in index 700093bee60..3f7f9a59da0 100644 --- a/events/src/eventsapiconf.py.in +++ b/events/src/eventsapiconf.py.in @@ -12,6 +12,7 @@ import subprocess glusterd_workdir = None + # Methods def get_glusterd_workdir(): global glusterd_workdir @@ -19,7 +20,7 @@ def get_glusterd_workdir(): return glusterd_workdir proc = subprocess.Popen(["gluster", "system::", "getwd"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, - universal_newlines = True) + universal_newlines=True) out, err = proc.communicate() if proc.returncode == 0: glusterd_workdir = out.strip() @@ -27,6 +28,7 @@ def get_glusterd_workdir(): glusterd_workdir = "@GLUSTERD_WORKDIR@" return glusterd_workdir + SERVER_ADDRESS = "0.0.0.0" SERVER_ADDRESSv4 = "0.0.0.0" SERVER_ADDRESSv6 = "::1" From b36b83eb8c5f329aada7577d02e91bb2b686ec3d Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 19:19:42 -0300 Subject: [PATCH 59/72] python linter: clean events/eventskeygen.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # events/eventskeygen.py | 41 +++++++++++++++++++++-------------------- # 1 file changed, 21 insertions(+), 20 deletions(-) # events/eventskeygen.py | 41 +++++++++++++++++++++-------------------- # 1 file changed, 21 insertions(+), 20 deletions(-) --- events/eventskeygen.py | 41 +++++++++++++++++++++-------------------- 1 file changed, 21 insertions(+), 20 deletions(-) diff --git a/events/eventskeygen.py b/events/eventskeygen.py index e28ebe9b7e6..ea55add980d 100644 --- a/events/eventskeygen.py +++ b/events/eventskeygen.py @@ -1,5 +1,6 @@ #!/usr/bin/python3 # -*- coding: utf-8 -*- +"""Events eventskeygen.""" # # Copyright (c) 2016 Red Hat, Inc. # This file is part of GlusterFS. @@ -22,7 +23,7 @@ # When adding new keys add it to the END keys = ( # user driven events - #peer and volume management events + # peer and volume management events "EVENT_PEER_ATTACH", "EVENT_PEER_DETACH", "EVENT_VOLUME_CREATE", @@ -35,7 +36,7 @@ "EVENT_BRICK_RESET_COMMIT", "EVENT_BRICK_REPLACE", - #geo-rep events + # geo-rep events "EVENT_GEOREP_CREATE", "EVENT_GEOREP_START", "EVENT_GEOREP_STOP", @@ -45,7 +46,7 @@ "EVENT_GEOREP_CONFIG_SET", "EVENT_GEOREP_CONFIG_RESET", - #bitrot events + # bitrot events "EVENT_BITROT_ENABLE", "EVENT_BITROT_DISABLE", "EVENT_BITROT_SCRUB_THROTTLE", @@ -53,7 +54,7 @@ "EVENT_BITROT_SCRUB_OPTION", "EVENT_BITROT_SCRUB_ONDEMAND", - #quota events + # quota events "EVENT_QUOTA_ENABLE", "EVENT_QUOTA_DISABLE", "EVENT_QUOTA_SET_USAGE_LIMIT", @@ -65,7 +66,7 @@ "EVENT_QUOTA_HARD_TIMEOUT", "EVENT_QUOTA_DEFAULT_SOFT_LIMIT", - #snapshot events + # snapshot events "EVENT_SNAPSHOT_CREATED", "EVENT_SNAPSHOT_CREATE_FAILED", "EVENT_SNAPSHOT_ACTIVATED", @@ -95,8 +96,8 @@ "EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETED", "EVENT_SNAPSHOT_SCHEDULER_SCHEDULE_DELETE_FAILED", - #async events - #glusterd events + # async events + # glusterd events "EVENT_SVC_MANAGER_FAILED", "EVENT_SVC_RECONFIGURE_FAILED", "EVENT_SVC_CONNECTED", @@ -123,38 +124,38 @@ "EVENT_IMPORT_BRICK_FAILED", "EVENT_COMPARE_FRIEND_VOLUME_FAILED", "EVENT_NFS_GANESHA_EXPORT_FAILED", - #ec events + # ec events "EVENT_EC_MIN_BRICKS_NOT_UP", "EVENT_EC_MIN_BRICKS_UP", - #georep async events + # georep async events "EVENT_GEOREP_FAULTY", "EVENT_GEOREP_CHECKPOINT_COMPLETED", "EVENT_GEOREP_ACTIVE", "EVENT_GEOREP_PASSIVE", - #quota async events + # quota async events "EVENT_QUOTA_CROSSED_SOFT_LIMIT", - #bitrot async events + # bitrot async events "EVENT_BITROT_BAD_FILE", - #protocol-server events + # protocol-server events "EVENT_CLIENT_CONNECT", "EVENT_CLIENT_AUTH_REJECT", "EVENT_CLIENT_DISCONNECT", - #posix events + # posix events "EVENT_POSIX_SAME_GFID", "EVENT_POSIX_ALREADY_PART_OF_VOLUME", "EVENT_POSIX_BRICK_NOT_IN_VOLUME", "EVENT_POSIX_BRICK_VERIFICATION_FAILED", "EVENT_POSIX_ACL_NOT_SUPPORTED", "EVENT_POSIX_HEALTH_CHECK_FAILED", - #afr events + # afr events "EVENT_AFR_QUORUM_MET", "EVENT_AFR_QUORUM_FAIL", "EVENT_AFR_SUBVOL_UP", "EVENT_AFR_SUBVOLS_DOWN", "EVENT_AFR_SPLIT_BRAIN", - #tier events + # tier events "EVENT_TIER_ATTACH", "EVENT_TIER_ATTACH_FORCE", "EVENT_TIER_DETACH_START", @@ -168,8 +169,8 @@ "EVENT_TIER_WATERMARK_RAISED_TO_MID", "EVENT_TIER_WATERMARK_DROPPED_TO_LOW", - #dht events - #add/remove brick events + # dht events + # add/remove brick events "EVENT_VOLUME_ADD_BRICK", "EVENT_VOLUME_ADD_BRICK_FAILED", "EVENT_VOLUME_REMOVE_BRICK_START", @@ -182,17 +183,17 @@ "EVENT_VOLUME_REMOVE_BRICK_FORCE_FAILED", "EVENT_VOLUME_REMOVE_BRICK_FAILED", - #rebalance events + # rebalance events "EVENT_VOLUME_REBALANCE_START", "EVENT_VOLUME_REBALANCE_STOP", "EVENT_VOLUME_REBALANCE_FAILED", "EVENT_VOLUME_REBALANCE_COMPLETE", - #tier events + # tier events "EVENT_TIER_START", "EVENT_TIER_START_FORCE", - #brick/inodes events + # brick/inodes events "EVENT_DHT_DISK_USAGE", "EVENT_DHT_INODES_USAGE", ) From 911ef39f9220f11c1c95b3a42612e6c7e1887589 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 26 Feb 2025 11:37:29 -0300 Subject: [PATCH 60/72] python linter: clean api/examples/getvolfile.py This series aim to reduce the python linter build logs. This commit silences most of the python linter complaints without any functional changes. The changes include mostly formatting and missing obvious docstrings. We left out dependency and exception handling complaints on purpose to be addressed separately. Signed-off-by: Thales Antunes de Oliveira Barretto # api/examples/getvolfile.py | 6 +++++- # 1 file changed, 5 insertions(+), 1 deletion(-) # api/examples/getvolfile.py | 6 +++++- # 1 file changed, 5 insertions(+), 1 deletion(-) --- api/examples/getvolfile.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/api/examples/getvolfile.py b/api/examples/getvolfile.py index 3b2c8ab5a15..7f35ae5572e 100755 --- a/api/examples/getvolfile.py +++ b/api/examples/getvolfile.py @@ -1,6 +1,8 @@ #!/usr/bin/python3 +"""gfapi example for glfs_get_volfile().""" from __future__ import print_function + import ctypes import ctypes.util @@ -12,6 +14,7 @@ def get_volfile(host, volume): + """Exercises glfs_get_volfile.""" # This is set to a large value to exercise the "buffer not big enough" # path. More realistically, you'd just start with a huge buffer. BUF_LEN = 0 @@ -30,6 +33,7 @@ def get_volfile(host, volume): return vlen return vbuf.value[:vlen] + if __name__ == "__main__": import sys @@ -42,4 +46,4 @@ def get_volfile(host, volume): for line in res.split('\n'): print(line) except: - print("bad return value %s" % res) + print(f"bad return value {res}") From 3f6428a6629be82bc2d541164f664ec4f4257049 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:28:47 -0300 Subject: [PATCH 61/72] python linter: raw string extras/create_new_xlator/generate_xlator.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/create_new_xlator/generate_xlator.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # extras/create_new_xlator/generate_xlator.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- extras/create_new_xlator/generate_xlator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/create_new_xlator/generate_xlator.py b/extras/create_new_xlator/generate_xlator.py index 2e6e9438635..98f18370dff 100755 --- a/extras/create_new_xlator/generate_xlator.py +++ b/extras/create_new_xlator/generate_xlator.py @@ -170,7 +170,7 @@ def get_copyright(): def load_fragments(): - pragma_re = re.compile('pragma fragment (.*)') + pragma_re = re.compile(r'pragma fragment (.*)') cur_symbol = None cur_value = "" result = {} From 2affd7ae693ee471eb4556e8790ccb83ef4bdd52 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:28:53 -0300 Subject: [PATCH 62/72] python linter: raw string extras/failed-tests.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/failed-tests.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # extras/failed-tests.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- extras/failed-tests.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/failed-tests.py b/extras/failed-tests.py index f7f110246b5..d5fa717365a 100755 --- a/extras/failed-tests.py +++ b/extras/failed-tests.py @@ -28,7 +28,7 @@ def process_failure(url, node): if VERBOSE: print(t2.encode('utf-8')) if t2.find("Wstat") != -1: - test_case = re.search('\./tests/.*\.t', t2) + test_case = re.search(r'\./tests/.*\.t', t2) if test_case: summary[test_case.group()].append((url, node)) accum = [] From 91ffbd5dabc0515bd117e2ebe8df2b5d1c8f82a4 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:28:56 -0300 Subject: [PATCH 63/72] python linter: raw string extras/git-branch-diff.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/git-branch-diff.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) # extras/git-branch-diff.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) --- extras/git-branch-diff.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extras/git-branch-diff.py b/extras/git-branch-diff.py index 4e6d821a76f..4b04229b013 100755 --- a/extras/git-branch-diff.py +++ b/extras/git-branch-diff.py @@ -140,12 +140,12 @@ def check_author_exist(self): for ide in ide_list: cmd4 = 'git log ' + self.s_pattern + ' --author=' + ide c_list = subprocess.check_output(cmd4, shell=True) - if len(c_list) is 0: + if len(c_list) == 0: print("Error: --author=%s doesn't exit" % self.g_author) print("see '%s --help'" % __file__) exit(1) if len(ide_list) > 1: - self.g_author = "\|".join(ide_list) + self.g_author = r"\|".join(ide_list) def connected_to_gerrit(self): "Check if gerrit server is reachable." From 997d89ff1c91d1b137cd9645330691d955951435 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:28:59 -0300 Subject: [PATCH 64/72] python linter: raw string extras/quota/quota_fsck.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/quota_fsck.py | 10 +++++----- # 1 file changed, 5 insertions(+), 5 deletions(-) # extras/quota/quota_fsck.py | 10 +++++----- # 1 file changed, 5 insertions(+), 5 deletions(-) --- extras/quota/quota_fsck.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py index 624ade99797..3d71d83e3d5 100755 --- a/extras/quota/quota_fsck.py +++ b/extras/quota/quota_fsck.py @@ -171,12 +171,12 @@ def get_quota_xattr_brick(dpath): if xattr_key == "": # skip any empty lines continue - elif not re.search("quota", xattr_key): + elif not re.search(r"quota", xattr_key): # skip all non quota xattr. continue xattr_value = xattr.split("=")[1] - if re.search("contri", xattr_key): + if re.search(r"contri", xattr_key): xattr_version = xattr_key.split(".")[5] if 'version' not in xattr_dict: @@ -207,14 +207,14 @@ def get_quota_xattr_brick(dpath): xattr_dict['size'] = int(xattr_value[2:18], 16) xattr_dict['file_count'] = int(xattr_value[18:34], 16) xattr_dict['dir_count'] = int(xattr_value[34:], 16) - elif re.search("dirty", xattr_key): + elif re.search(r"dirty", xattr_key): if xattr_value == IS_CLEAN: xattr_dict['dirty'] = False elif xattr_value == IS_DIRTY: xattr_dict['dirty'] = True - elif re.search("limit_objects", xattr_key): + elif re.search(r"limit_objects", xattr_key): xattr_dict['limit_objects'] = int(xattr_value[2:18], 16) - elif re.search("limit_set", xattr_key): + elif re.search(r"limit_set", xattr_key): xattr_dict['limit_set'] = int(xattr_value[2:18], 16) return xattr_dict From 30ee9627c424129d6e97b653b3c839d04dcc1d8a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:29:02 -0300 Subject: [PATCH 65/72] python linter: raw string extras/quota/xattr_analysis.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/xattr_analysis.py | 12 ++++++------ # 1 file changed, 6 insertions(+), 6 deletions(-) # extras/quota/xattr_analysis.py | 12 ++++++------ # 1 file changed, 6 insertions(+), 6 deletions(-) --- extras/quota/xattr_analysis.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py index 4fa90a349a7..5f5b4aa147f 100755 --- a/extras/quota/xattr_analysis.py +++ b/extras/quota/xattr_analysis.py @@ -31,7 +31,7 @@ def get_quota_xattr_brick(): mismatch_size = [('====contri_size===', '====size====')] for xattr in pairs: k = xattr.split("=")[0] - if re.search("# file:", k): + if re.search(r"# file:", k): print(xdict) filename = k print("=====" + filename + "=======") @@ -41,7 +41,7 @@ def get_quota_xattr_brick(): else: print(xattr) v = xattr.split("=")[1] - if re.search("contri", k): + if re.search(r"contri", k): if len(v) == 34: # for files size is obtained in iatt, file count # should be 1, dir count=0 @@ -51,18 +51,18 @@ def get_quota_xattr_brick(): xdict['contri_size'] = size(int(v[2:18], 16)) xdict['contri_file_count'] = int(v[18:34], 16) xdict['contri_dir_count'] = int(v[34:], 16) - elif re.search("size", k): + elif re.search(r"size", k): xdict['size'] = size(int(v[2:18], 16)) xdict['file_count'] = int(v[18:34], 16) xdict['dir_count'] = int(v[34:], 16) - elif re.search("dirty", k): + elif re.search(r"dirty", k): if v == '0x3000': xdict['dirty'] = False elif v == '0x3100': xdict['dirty'] = True - elif re.search("limit_objects", k): + elif re.search(r"limit_objects", k): xdict['limit_objects'] = int(v[2:18], 16) - elif re.search("limit_set", k): + elif re.search(r"limit_set", k): xdict['limit_set'] = size(int(v[2:18], 16)) if 'size' in xdict and 'contri_size' in xdict and xdict['size'] != xdict['contri_size']: From c0a76963d26a77621fbf0581df7ecb22b49573d9 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:29:07 -0300 Subject: [PATCH 66/72] python linter: raw string geo-replication/syncdaemon/resource.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # geo-replication/syncdaemon/resource.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # geo-replication/syncdaemon/resource.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- geo-replication/syncdaemon/resource.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/geo-replication/syncdaemon/resource.py b/geo-replication/syncdaemon/resource.py index af5d7f99b23..b3774d1aeb8 100644 --- a/geo-replication/syncdaemon/resource.py +++ b/geo-replication/syncdaemon/resource.py @@ -808,7 +808,7 @@ def keep_alive(cls, dct): val = struct.pack(cls.FRGN_FMTSTR, *(dct['version'] + tuple(int(x, 16) - for x in re.findall('(?:[\da-f]){2}', + for x in re.findall(r'(?:[\da-f]){2}', dct['uuid'])) + (dct['retval'],) + dct['volume_mark'][0:2] + ( dct['timeout'],))) From a7e546e824d8340168400ac01f46978fb653535a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:29:09 -0300 Subject: [PATCH 67/72] python linter: raw string tests/utils/create-files.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/create-files.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # tests/utils/create-files.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- tests/utils/create-files.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/create-files.py b/tests/utils/create-files.py index ab2f4a7b2ed..7d56ab07d76 100755 --- a/tests/utils/create-files.py +++ b/tests/utils/create-files.py @@ -345,7 +345,7 @@ def human2bytes(size): 1024*1024: ['M', 'MB', 'MiB'], 1024*1024*1024: ['G', 'GB', 'GiB'] } - num = re.search('(\d+)', size).group() + num = re.search(r'(\d+)', size).group() ext = size[len(num):] num = int(num) if ext == '': From fda8035214c7688601a419fd3eaca346cb9cbdce Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 11:29:11 -0300 Subject: [PATCH 68/72] python linter: raw string tools/gfind_missing_files/gfid_to_path.py This series aim to reduce the python linter build logs. This commit changes to "raw" strings for regex operations. Signed-off-by: Thales Antunes de Oliveira Barretto # tools/gfind_missing_files/gfid_to_path.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) # tools/gfind_missing_files/gfid_to_path.py | 4 ++-- # 1 file changed, 2 insertions(+), 2 deletions(-) --- tools/gfind_missing_files/gfid_to_path.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/gfind_missing_files/gfid_to_path.py b/tools/gfind_missing_files/gfid_to_path.py index 01e08a9494a..c00168add1b 100644 --- a/tools/gfind_missing_files/gfid_to_path.py +++ b/tools/gfind_missing_files/gfid_to_path.py @@ -54,8 +54,8 @@ def find_path_from_changelog(fd, gfid): """ content = fd.read() - pattern = "E%s" % gfid - pattern += "\x00(3|23)\x00\d+\x00\d+\x00\d+\x00([^\x00]+)/([^\x00]+)" + pattern = r"E%s" % gfid + pattern += r"\x00(3|23)\x00\d+\x00\d+\x00\d+\x00([^\x00]+)/([^\x00]+)" pat = re.compile(pattern) match = pat.search(content) From c4cb2043ddec60ed06ce532081be085ea21a6902 Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Tue, 4 Mar 2025 15:09:44 -0300 Subject: [PATCH 69/72] python linter: string comment extras/git-branch-diff.py This series aim to reduce the python linter build logs. This commit changes unassigned strings to comments. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/git-branch-diff.py | 148 +++++++++++++++++++++++----------------------- # 1 file changed, 74 insertions(+), 74 deletions(-) # extras/git-branch-diff.py | 148 +++++++++++++++++++++++----------------------- # 1 file changed, 74 insertions(+), 74 deletions(-) --- extras/git-branch-diff.py | 148 +++++++++++++++++++------------------- 1 file changed, 74 insertions(+), 74 deletions(-) diff --git a/extras/git-branch-diff.py b/extras/git-branch-diff.py index 4b04229b013..5df9b2e6587 100755 --- a/extras/git-branch-diff.py +++ b/extras/git-branch-diff.py @@ -1,79 +1,79 @@ !/bin/python2 -""" - Copyright (c) 2016 Red Hat, Inc. - This file is part of GlusterFS. - - This file is licensed to you under your choice of the GNU Lesser - General Public License, version 3 or any later version (LGPLv3 or - later), or the GNU General Public License, version 2 (GPLv2), in all - cases as published by the Free Software Foundation. -""" - -""" - ABOUT: - This script helps in visualizing backported and missed commits between two - different branches, tags or commit ranges. In the list of missed commits, - it will help you identify patches which are posted for reviews on gerrit server. - - USAGE: - $ ./extras/git-branch-diff.py --help - usage: git-branch-diff.py [-h] [-s SOURCE] -t TARGET [-a AUTHOR] [-p PATH] - [-o OPTIONS] - - git wrapper to diff local or remote branches/tags/commit-ranges - - optional arguments: - -h, --help show this help message and exit - -s SOURCE, --source SOURCE - source pattern, it could be a branch, tag or a commit - range - -t TARGET, --target TARGET - target pattern, it could be a branch, tag or a commit - range - -a AUTHOR, --author AUTHOR - default: git config name/email, to provide multiple - specify comma separated values - -p PATH, --path PATH show source and target diff w.r.t given path, to - provide multiple specify space in between them - -o OPTIONS, --options OPTIONS - add other git options such as --after=<>, --before=<> - etc. experts use; - - SAMPLE EXECUTIONS: - $ ./extras/git-branch-diff.py -t origin/release-3.8 - - $ ./extras/git-branch-diff.py -s local_branch -t origin/release-3.7 - - $ ./extras/git-branch-diff.py -s 4517bf8..e66add8 -t origin/release-3.7 - $ ./extras/git-branch-diff.py -s HEAD..c4efd39 -t origin/release-3.7 - - $ ./extras/git-branch-diff.py -t v3.7.11 --author="author@redhat.com" - $ ./extras/git-branch-diff.py -t v3.7.11 --author="authorX, authorY, authorZ" - - $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="xlators/" - $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="./xlators ./rpc" - - $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="*" - $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="All" - $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="Null" - - $ ./extras/git-branch-diff.py -t v3.7.11 --options="--after=2015-03-01 \ - --before=2016-01-30" - - DECLARATION: - While backporting commit to another branch only subject of the patch may - remain unchanged, all others such as commit message, commit Id, change Id, - bug Id, may be changed. This script works by taking commit subject as the - key value for comparing two git branches, which can be local or remote. - - Note: This script may ignore commits which have altered their commit subjects - while backporting patches. Also this script doesn't have any intelligence to - detect squashed commits. - - AUTHOR: - Prasanna Kumar Kalever -""" +# +# Copyright (c) 2016 Red Hat, Inc. +# This file is part of GlusterFS. + +# This file is licensed to you under your choice of the GNU Lesser +# General Public License, version 3 or any later version (LGPLv3 or +# later), or the GNU General Public License, version 2 (GPLv2), in all +# cases as published by the Free Software Foundation. +# + +# +# ABOUT: +# This script helps in visualizing backported and missed commits between two +# different branches, tags or commit ranges. In the list of missed commits, +# it will help you identify patches which are posted for reviews on gerrit server. + +# USAGE: +# $ ./extras/git-branch-diff.py --help +# usage: git-branch-diff.py [-h] [-s SOURCE] -t TARGET [-a AUTHOR] [-p PATH] +# [-o OPTIONS] + +# git wrapper to diff local or remote branches/tags/commit-ranges + +# optional arguments: +# -h, --help show this help message and exit +# -s SOURCE, --source SOURCE +# source pattern, it could be a branch, tag or a commit +# range +# -t TARGET, --target TARGET +# target pattern, it could be a branch, tag or a commit +# range +# -a AUTHOR, --author AUTHOR +# default: git config name/email, to provide multiple +# specify comma separated values +# -p PATH, --path PATH show source and target diff w.r.t given path, to +# provide multiple specify space in between them +# -o OPTIONS, --options OPTIONS +# add other git options such as --after=<>, --before=<> +# etc. experts use; + +# SAMPLE EXECUTIONS: +# $ ./extras/git-branch-diff.py -t origin/release-3.8 + +# $ ./extras/git-branch-diff.py -s local_branch -t origin/release-3.7 + +# $ ./extras/git-branch-diff.py -s 4517bf8..e66add8 -t origin/release-3.7 +# $ ./extras/git-branch-diff.py -s HEAD..c4efd39 -t origin/release-3.7 + +# $ ./extras/git-branch-diff.py -t v3.7.11 --author="author@redhat.com" +# $ ./extras/git-branch-diff.py -t v3.7.11 --author="authorX, authorY, authorZ" + +# $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="xlators/" +# $ ./extras/git-branch-diff.py -t origin/release-3.8 --path="./xlators ./rpc" + +# $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="*" +# $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="All" +# $ ./extras/git-branch-diff.py -t origin/release-3.6 --author="Null" + +# $ ./extras/git-branch-diff.py -t v3.7.11 --options="--after=2015-03-01 \ +# --before=2016-01-30" + +# DECLARATION: +# While backporting commit to another branch only subject of the patch may +# remain unchanged, all others such as commit message, commit Id, change Id, +# bug Id, may be changed. This script works by taking commit subject as the +# key value for comparing two git branches, which can be local or remote. + +# Note: This script may ignore commits which have altered their commit subjects +# while backporting patches. Also this script doesn't have any intelligence to +# detect squashed commits. + +# AUTHOR: +# Prasanna Kumar Kalever +# from __future__ import print_function From 74108ee4fa4b40b1d67fdcc1ca703af3f3e832cd Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 5 Mar 2025 10:48:09 -0300 Subject: [PATCH 70/72] python linter: syntax "not in" tests/utils/gfid-access.py This series aim to reduce the python linter build logs. This commit changes a syntax error for checking when checking for membership with "for not x in group" to "for x not in group". Signed-off-by: Thales Antunes de Oliveira Barretto # tests/utils/gfid-access.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # tests/utils/gfid-access.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- tests/utils/gfid-access.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/utils/gfid-access.py b/tests/utils/gfid-access.py index d28b0050439..cc26a0c4c05 100755 --- a/tests/utils/gfid-access.py +++ b/tests/utils/gfid-access.py @@ -134,7 +134,7 @@ def entry_pack_symlink(gf, bn, lnk, mo, uid, gid): Xattr.lsetxattr(pargfid, 'glusterfs.gfid.newfile', blob) except OSError: ex = sys.exc_info()[1] - if not ex.errno in [EEXIST]: + if ex.errno not in [EEXIST]: raise sys.exit(-1) print("File creation OK") From cdbed3b3021856d210cd514ca97996a7beae7bea Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 5 Mar 2025 10:54:41 -0300 Subject: [PATCH 71/72] python linter: literal comparison extras/quota/xattr_analysis.py This series aim to reduce the python linter build logs. Changes a comparison to literal value done with "is" to "==". Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/xattr_analysis.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) # extras/quota/xattr_analysis.py | 2 +- # 1 file changed, 1 insertion(+), 1 deletion(-) --- extras/quota/xattr_analysis.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extras/quota/xattr_analysis.py b/extras/quota/xattr_analysis.py index 5f5b4aa147f..ba47ab22fbc 100755 --- a/extras/quota/xattr_analysis.py +++ b/extras/quota/xattr_analysis.py @@ -36,7 +36,7 @@ def get_quota_xattr_brick(): filename = k print("=====" + filename + "=======") xdict = {} - elif k is "": + elif k == "": pass else: print(xattr) From 9b3b2e0e121730fab2cfa5d955f0c53b54ef5b5a Mon Sep 17 00:00:00 2001 From: Thales Antunes de Oliveira Barretto Date: Wed, 5 Mar 2025 17:27:25 -0300 Subject: [PATCH 72/72] python linter: string comment extras/quota/quota_fsck.py This series aim to reduce the python linter build logs. This commit changes unassigned strings to comments. Signed-off-by: Thales Antunes de Oliveira Barretto # extras/quota/quota_fsck.py | 106 ++++++++++++++++++++++----------------------- # 1 file changed, 51 insertions(+), 55 deletions(-) # extras/quota/quota_fsck.py | 106 ++++++++++++++++++++++----------------------- # 1 file changed, 51 insertions(+), 55 deletions(-) --- extras/quota/quota_fsck.py | 106 ++++++++++++++++++------------------- 1 file changed, 51 insertions(+), 55 deletions(-) diff --git a/extras/quota/quota_fsck.py b/extras/quota/quota_fsck.py index 3d71d83e3d5..304b8f4a0e4 100755 --- a/extras/quota/quota_fsck.py +++ b/extras/quota/quota_fsck.py @@ -1,17 +1,19 @@ #!/usr/bin/python3 -# The following script enables, Detecting, Reporting and Fixing -# anomalies in quota accounting. Run this script with -h option -# for further details. +"""Extras quota/quota-fsck. +The following script enables, Detecting, Reporting and Fixing +anomalies in quota accounting. Run this script with -h option +for further details. """ - Copyright (c) 2018 Red Hat, Inc. - This file is part of GlusterFS. - This file is licensed to you under your choice of the GNU Lesser - General Public License, version 3 or any later version (LGPLv3 or - later), or the GNU General Public License, version 2 (GPLv2), in all - cases as published by the Free Software Foundation. -""" +# Copyright (c) 2018 Red Hat, Inc. +# This file is part of GlusterFS. +# +# This file is licensed to you under your choice of the GNU Lesser +# General Public License, version 3 or any later version (LGPLv3 or +# later), or the GNU General Public License, version 2 (GPLv2), in all +# cases as published by the Free Software Foundation. + from __future__ import print_function import argparse @@ -86,34 +88,34 @@ def size_differs_lot(s1, s2): def fix_hardlink_accounting(curr_dict, accounted_dict, curr_size): + """Fix hardlink accounting. + + Hard links are messy.. we have to account them for their parent + directory. But, stop accounting at the most common ancestor. + Eg: + say we have 3 hardlinks : /d1/d2/h1, /d1/d3/h2 and /d1/h3 + + suppose we encounter the hard links h1 first , then h2 and then h3. + while accounting for h1, we account the size until root(d2->d1->/) + while accounting for h2, we need to account only till d3. (as d1 + and / are accounted for this inode). + while accounting for h3 we should not account at all.. as all + its ancestors are already accounted for same inode. + + curr_dict : dict of hardlinks that were seen and + accounted by the current iteration. + accounted_dict : dict of hardlinks that has already been + accounted for. + + size : size of the object as accounted by the + curr_iteration. + + Return vale: + curr_size : size reduced by hardlink sizes for those + hardlinks that has already been accounted + in current subtree. + Also delete the duplicate link from curr_dict. """ - Hard links are messy.. we have to account them for their parent - directory. But, stop accounting at the most common ancestor. - Eg: - say we have 3 hardlinks : /d1/d2/h1, /d1/d3/h2 and /d1/h3 - - suppose we encounter the hard links h1 first , then h2 and then h3. - while accounting for h1, we account the size until root(d2->d1->/) - while accounting for h2, we need to account only till d3. (as d1 - and / are accounted for this inode). - while accounting for h3 we should not account at all.. as all - its ancestors are already accounted for same inode. - - curr_dict : dict of hardlinks that were seen and - accounted by the current iteration. - accounted_dict : dict of hardlinks that has already been - accounted for. - - size : size of the object as accounted by the - curr_iteration. - - Return vale: - curr_size : size reduced by hardlink sizes for those - hardlinks that has already been accounted - in current subtree. - Also delete the duplicate link from curr_dict. - """ - dual_accounted_links = set(curr_dict.keys()) & set(accounted_dict.keys()) for link in dual_accounted_links: curr_size = curr_size - curr_dict[link] @@ -144,24 +146,18 @@ def get_quota_xattr_brick(dpath): "-d", "-m.", "-e", "hex", dpath]) pairs = out.splitlines() - """ - Sample output to be parsed: - [root@dhcp35-100 mnt]# getfattr -d -m. -e hex /export/b1/B0/d14/d13/ - # file: export/b1/B0/d14/d13/ - security.selinux=0x756e636f6e66696e65645f753a6f626a6563745f723a7573725f743a733000 - trusted.gfid=0xbae5e0d2d05043de9fd851d91ecf63e8 - trusted.glusterfs.dht=0x000000010000000000000000ffffffff - trusted.glusterfs.dht.mds=0x00000000 - trusted.glusterfs.quota.6a7675a3-b85a-40c5-830b-de9229d702ce.contri.39=0x00000000000000000000000000000000000000000000000e - trusted.glusterfs.quota.dirty=0x3000 - trusted.glusterfs.quota.size.39=0x00000000000000000000000000000000000000000000000e - """ - - """ - xattr_dict dictionary holds quota related xattrs - eg: - """ - + # Sample output to be parsed: + # [root@dhcp35-100 mnt]# getfattr -d -m. -e hex /export/b1/B0/d14/d13/ + # # file: export/b1/B0/d14/d13/ + # security.selinux=0x756e636f6e66696e65645f753a6f626a6563745f723a7573725f743a733000 + # trusted.gfid=0xbae5e0d2d05043de9fd851d91ecf63e8 + # trusted.glusterfs.dht=0x000000010000000000000000ffffffff + # trusted.glusterfs.dht.mds=0x00000000 + # trusted.glusterfs.quota.6a7675a3-b85a-40c5-830b-de9229d702ce.contri.39=0x00000000000000000000000000000000000000000000000e + # trusted.glusterfs.quota.dirty=0x3000 + # trusted.glusterfs.quota.size.39=0x00000000000000000000000000000000000000000000000e + + # xattr_dict dictionary holds quota related xattrs xattr_dict = {} xattr_dict['parents'] = {}