#!/usr/bin/env python3

# pylint: disable=too-many-return-statements
# pylint: disable=global-variable-not-assigned
# pylint: disable=global-statement
# pylint: disable=too-many-lines
# pylint: disable=anomalous-backslash-in-string
# pylint: disable=protected-access

import copy
import enum
import glob

# Not requests, to avoid requiring extra dependency.
import http.client
import io
import itertools
import json
import math
import multiprocessing
import multiprocessing.managers
import multiprocessing.sharedctypes
import multiprocessing.synchronize
import os
import os.path
import platform
import random
import re
import shutil
import signal
import socket
import string
import subprocess
import sys
import shlex
import traceback
import urllib.parse

# for crc32
import zlib
from argparse import ArgumentParser, Namespace
from ast import literal_eval as make_tuple
from contextlib import contextmanager, redirect_stdout
from datetime import datetime, timedelta
from errno import ESRCH
from ssl import SSLEOFError
from subprocess import PIPE, Popen
from time import sleep, time
from typing import Dict, List, Optional, Set, Tuple, Union

try:
    import termcolor  # type: ignore
except ImportError:
    termcolor = None


USE_JINJA = True
try:
    import jinja2
except ImportError:
    USE_JINJA = False
    print("WARNING: jinja2 not installed! Template tests will be skipped.")

MESSAGES_TO_RETRY = [
    "ConnectionPoolWithFailover: Connection failed at try",
    "DB::Exception: New table appeared in database being dropped or detached. Try again",
    "is already started to be removing by another replica right now",
    # This is from LSan, and it indicates its own internal problem:
    "Unable to get registers from thread",
    # Next two can be caused by a flaky tmp FS; normally fine upon retry; details at #93451
    "No such file or directory",
    "Permission denied",
    "Net Exception: Connection reset by peer",
    # Transient Keeper errors that happen under heavy load (e.g. ARM ASan builds)
    "Coordination::Exception: Connection loss",
    "Coordination::Exception: Session expired",
]

IGNORED_SANITIZER_ERRORS = [
    "ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!",
    # Note, usually such error means that sanitizer has found something but failed to print, so it should not be ignored
    # "LLVM ERROR: IO failure on output stream: Broken pipe",
]

MAX_RETRIES = 3

TEST_FILE_EXTENSIONS = [".sql", ".sql.j2", ".sh", ".py", ".expect"]

VERSION_PATTERN = r"^((\d+\.)?(\d+\.)?(\d+\.)?\d+)$"

TEST_MAX_RUN_TIME_IN_SECONDS = 180


def detect_cgroup_version():
    if os.path.exists("/sys/fs/cgroup/cgroup.controllers"):
        return 2
    elif os.path.exists("/sys/fs/cgroup/memory"):
        return 1
    return None


CGROUP_VERSION = detect_cgroup_version()


class SharedEngineReplacer:
    SPECIALIZED_ENGINES = "Collapsing|VersionedCollapsing|Summing|Replacing|Aggregating|Coalescing"
    ENGINES_NON_REPLICATED_REGEXP = (
        rf"(?i)ENGINE[ =]{{0,5}}(({SPECIALIZED_ENGINES}|)MergeTree\(?\)?)"
    )
    ENGINES_MAPPING_REPLICATED = [
        ("ReplicatedMergeTree", "SharedMergeTree"),
        ("ReplicatedCollapsingMergeTree", "SharedCollapsingMergeTree"),
        (
            "ReplicatedVersionedCollapsingMergeTree",
            "SharedVersionedCollapsingMergeTree",
        ),
        ("ReplicatedSummingMergeTree", "SharedSummingMergeTree"),
        ("ReplicatedReplacingMergeTree", "SharedReplacingMergeTree"),
        ("ReplicatedAggregatingMergeTree", "SharedAggregatingMergeTree"),
        ("ReplicatedCoalescingMergeTree", "SharedCoalescingMergeTree"),
    ]
    NEW_SYNTAX_REPLICATED_MERGE_TREE_RE = (
        r"Replicated[a-zA-Z]*MergeTree\((\\?'.*\\?')?,?(\\?'.*\\?')?[a-zA-Z, _}{]*\)"
    )
    SHARED_MERGE_TREE_RE = (
        r"Shared[a-zA-Z]*MergeTree\((\\?'.*\\?')?,?(\\?'.*\\?')?[a-zA-Z, _}{]*\)"
    )

    OLD_SYNTAX_OR_ARGUMENTS_RE = r"Tree\(.*[0-9]+.*\)"

    CLUSTER_KEYWORDS = [
        "test_shard_localhost",
        "test_shard_localhost_secure",
        "test_cluster_two_shards_localhost",
        "test_cluster_two_shards",
        "test_cluster_1_shard_3_replicas_1_unavailable",
        "test_cluster_database_replicated",
        "test_cluster_one_shard_two_replicas",
        "test_cluster_one_shard_three_replicas_localhost",
        "test_cluster_two_shards_different_databases",
        "test_cluster_two_replicas_different_databases_internal_replication",
        "test_cluster_two_shards_different_databases_with_local",
        "test_cluster_two_shard_three_replicas_localhost",
    ]

    DISKS_MAP = {
        "s3_disk": "s3disk",
        "s3_plain_disk": "s3diskForSampleData",
        "s3_cache": "s3WithKeeperDiskWithCache",
        "local_disk": "default",
        "s3_no_cache": "s3WithKeeperDisk",
    }

    REGEX_REPLACE = {
        r"cluster_for_parallel_replicas\s?=\s?'parallel_replicas'": "cluster_for_parallel_replicas='default'",
        r"--cluster_for_parallel_replicas \"parallel_replicas\"": '--cluster_for_parallel_replicas "default"',
        r"(SET|set) log_queries = \d;": "",
        r"cluster\(parallel_replicas": r"cluster(default",
    }

    def _check_replicated_new_syntax(self, line):
        return re.search(self.NEW_SYNTAX_REPLICATED_MERGE_TREE_RE, line) is not None

    def _check_old_syntax_or_arguments(self, line):
        return re.search(self.OLD_SYNTAX_OR_ARGUMENTS_RE, line) is not None

    @classmethod
    def has_non_replicated(cls, filename):
        with open(filename, "r", newline="", encoding="utf-8", errors="ignore") as f:
            for line in f:
                if not ("cloud_mode" in line) and re.search(
                    cls.ENGINES_NON_REPLICATED_REGEXP, line
                ):
                    return True
        return False

    @classmethod
    def has_replicated_or_shared_or_cloud(cls, filename):
        with open(filename, "r", newline="", encoding="utf-8", errors="ignore") as f:
            for line in f:
                if (
                    "cloud_mode" in line
                    or re.search(cls.NEW_SYNTAX_REPLICATED_MERGE_TREE_RE, line)
                    is not None
                    or re.search(cls.SHARED_MERGE_TREE_RE, line) is not None
                ):
                    return True
        return False

    @staticmethod
    def _is_select_line(line):
        return re.match(r"^(SELECT).*", line, flags=re.IGNORECASE)

    @staticmethod
    def _is_create_query(line):
        return re.match(r"^(CREATE|ENGINE).*", line, flags=re.IGNORECASE)

    def _replace_non_replicated(
        self, line, escape_quotes, use_random_path, add_path=True, replace_with="Shared"
    ):
        groups = re.search(self.ENGINES_NON_REPLICATED_REGEXP, line)
        if groups is not None and not self._check_old_syntax_or_arguments(line):
            non_replicated_engine = groups.groups()[0]
            basename_no_ext = os.path.splitext(os.path.basename(self.file_name))[0]
            if use_random_path:
                shared_path = "/" + os.path.join(
                    basename_no_ext.replace("_", "/"),
                    str(os.getpid()),
                    str(random.randint(1, 1000)),
                )
            else:
                shared_path = "/" + os.path.join(
                    basename_no_ext.replace("_", "/"), str(os.getpid())
                )

            shared_engine = replace_with + non_replicated_engine.replace("()", "")

            if escape_quotes:
                # in cloud run we remove SMT parameters to use default ZK path and replica
                engine_path = f"(\\'{shared_path}\\', \\'1\\')"
            else:
                engine_path = f"('{shared_path}', '1')"

            if add_path:
                shared_engine += engine_path

            return line.replace(non_replicated_engine, shared_engine)

        return line

    def _need_to_replace_something(self):
        return (
            (
                (self.replace_replicated or self.replace_non_replicated)
                and "shared_merge_tree" not in self.file_name
            )
            or self.cloud
            or self.args.replace_log_memory_with_mergetree
        )

    def _has_show_create_table(self):
        with open(self.file_name, "r", encoding="utf-8") as f:
            return re.search("show create table", f.read(), re.IGNORECASE)

    def _remove_rmt_parameters(self, line):
        # Regexp for removing zookeeper path and replica name:
        # SharedMergeTree('/clickhouse/tables/{database}/t_lightweight_mut_6', '1')
        smt_with_parameters = rf"((Shared|Replicated)(?:{self.SPECIALIZED_ENGINES})?MergeTree)\('.*?',\s*'.*?'(?:,\s*(.*?))?\)"
        if re.search(smt_with_parameters, line):
            modified_statement = re.sub(
                smt_with_parameters,
                lambda m: f"{m.group(1)}({m.group(3) or ''})",
                line,
            )
            return modified_statement
        return line

    def _replace_data_engines(self, line, replace_engine="SharedMergeTree"):
        # These table engines are not supported
        data_engines = r"(Engine\s*=?\s*)(Log|TinyLog|StripeLog|Memory)\(?\)?"
        # Replace them with SMT
        replace_with = f"{replace_engine}() ORDER BY tuple()"
        if self.reference_file and not self.with_echo:
            replace_with = f"{replace_engine}\\nORDER BY tuple()\\nSETTINGS index_granularity = 8192"

        if (
            re.search(data_engines, line, flags=re.IGNORECASE)
            and "CREATE DATABASE" not in line.upper()
        ):
            modified_statement = re.sub(
                data_engines,
                lambda m: f"{m.group(1)}{replace_with}",
                line,
                flags=re.IGNORECASE,
            )
            return modified_statement
        return line

    def _replace_temporary_tables(self, line):
        # These table engines are not supported
        create_stmt = r"CREATE\s*TEMPORARY\s*TABLE"
        if re.search(create_stmt, line, flags=re.IGNORECASE):
            modified_statement = re.sub(
                create_stmt,
                "CREATE TABLE",
                line,
                flags=re.IGNORECASE,
            )
            return modified_statement
        return line

    def _replace_database_engines(self, line):
        pattern = r"(CREATE DATABASE\s+(?:IF NOT EXISTS\s+)?(?:`[^`]*`|\S+))(\s*ENGINE\s*=\s*(Shared|Replicated|Atomic|Ordinary|Memory)(\(.*\))?)"
        if re.search(pattern, line, flags=re.IGNORECASE):
            modified_statement = re.sub(
                pattern,
                lambda m: m.group(1),
                line,
                flags=re.IGNORECASE,
            )
            return modified_statement
        return line

    def __init__(
        self,
        args,
        file_name,
        replace_replicated,
        replace_non_replicated,
        reference_file,
        cloud,
    ):
        self.args = args
        self.file_name = file_name
        self.replace_replicated = replace_replicated
        self.replace_non_replicated = replace_non_replicated
        self.reference_file = reference_file
        self.cloud = cloud
        self.with_echo = False
        use_random_path = not reference_file and not self._has_show_create_table()

        if not self._need_to_replace_something():
            return

        self.temp_file_path = "{0}.{2}{1}".format(
            *os.path.splitext(file_name), os.getpid()
        )
        shutil.copy2(self.file_name, self.temp_file_path)

        with (
            open(self.file_name, "r", newline="", encoding="utf-8") as source,
            open(self.temp_file_path, "w", newline="", encoding="utf-8") as modified,
        ):
            for line in source:
                # Replace table engines if echo enabled to match changed reference
                if (
                    self.cloud or self.args.replace_log_memory_with_mergetree
                ) and re.match("-- ?{ ?echo.*", line):
                    self.with_echo = True

                if self.cloud and self.with_echo:
                    self.replace_replicated = True
                    self.replace_non_replicated = True

                if self.args.replace_log_memory_with_mergetree:
                    line = self._replace_data_engines(
                        line,
                        replace_engine="SharedMergeTree" if self.cloud else "MergeTree",
                    )

                if (
                    self._is_select_line(line)
                    or (reference_file and not self._is_create_query(line))
                ) and not self.cloud:
                    modified.write(line)
                    continue

                if self.replace_replicated:
                    for (
                        engine_from,
                        engine_to,
                    ) in self.ENGINES_MAPPING_REPLICATED:
                        if engine_from in line and (
                            self._check_replicated_new_syntax(line)
                            or engine_from + " " in line
                            or engine_from + ";" in line
                        ):
                            line = line.replace(engine_from, engine_to)
                            break

                if args.shared_catalog_stress:
                    line = self._replace_data_engines(line)
                    line = self._replace_database_engines(line)

                # Remove parameters (zk path and replica name) from SMT tables
                # because they are not supported in cloud
                if self.cloud:
                    line = self._remove_rmt_parameters(line)
                    line = self._replace_data_engines(line)
                    line = self._replace_database_engines(line)
                    # line = self._replace_temporary_tables(line)

                    # In cloud, we have only default cluster and cluster for each database. Test only default now
                    for keyword in self.CLUSTER_KEYWORDS:
                        line = line.replace(keyword, "default")

                    for prev, new in self.DISKS_MAP.items():
                        line = line.replace(prev, new)

                    for pattern, replacement in self.REGEX_REPLACE.items():
                        line = re.sub(pattern, replacement, line)

                if self.replace_non_replicated:
                    line = self._replace_non_replicated(
                        line, reference_file, use_random_path, add_path=not self.cloud
                    )

                modified.write(line)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        if not self._need_to_replace_something():
            return
        os.unlink(self.temp_file_path)

    def get_path(self):
        if not self._need_to_replace_something():
            return self.file_name
        return self.temp_file_path


def stringhash(s: str) -> int:
    # default hash() function consistent
    # only during process invocation https://stackoverflow.com/a/42089311
    return zlib.crc32(s.encode("utf-8"))


def read_file_as_binary_string(file_path):
    with open(file_path, "rb") as file:
        binary_data = file.read()
    return binary_data


# First and last lines of the log
def trim_for_log(s, limit=10000):
    if not s:
        return s
    lines = s.splitlines()
    if len(lines) > limit:
        separator = "-" * 40 + str(len(lines) - limit) + " lines are hidden" + "-" * 40
        return "\n".join(lines[:limit//2] + [] + [separator] + [] + lines[-limit//2:])
    return "\n".join(lines)


def is_valid_utf_8(fname):
    try:
        with open(fname, "rb") as f:
            contents = f.read()
            contents.decode("utf-8")
            return True
    except UnicodeDecodeError:
        return False


class TestException(Exception):
    pass


class ConfigException(Exception):
    pass


class HTTPError(Exception):
    def __init__(self, message=None, code=None):
        self.message = message
        self.code = code
        super().__init__(message)

    def __str__(self):
        return f"Code: {self.code}. {self.message}"


# Helpers to execute queries via HTTP interface.
def clickhouse_execute_http(
    base_args,
    query,
    body=None,
    timeout=30,
    settings=None,
    default_format=None,
    max_http_retries=5,
    retry_error_codes=False,
):
    if base_args.secure:
        client = http.client.HTTPSConnection(
            host=base_args.tcp_host, port=base_args.http_port, timeout=timeout
        )
    else:
        client = http.client.HTTPConnection(
            host=base_args.tcp_host, port=base_args.http_port, timeout=timeout
        )

    timeout = int(timeout)
    params = {
        "query": query,
        # hung check in stress tests may remove the database,
        # hence we should use 'system'.
        "database": "system",
        "connect_timeout": timeout,
        "receive_timeout": timeout,
        "send_timeout": timeout,
        "http_connection_timeout": timeout,
        "http_receive_timeout": timeout,
        "http_send_timeout": timeout,
        "output_format_parallel_formatting": 0,
        "max_rows_to_read": 0,  # Some queries read from system.text_log which might get too big
    }
    if settings is not None:
        params.update(settings)
    if default_format is not None:
        params["default_format"] = default_format

    for i in range(max_http_retries):
        try:
            client.request(
                "POST",
                f"/?{base_args.client_options_query_str}{urllib.parse.urlencode(params)}",
                body=body,
            )
            res = client.getresponse()
            data = res.read()
            if res.status == 200 or (not retry_error_codes):
                break
        except Exception as ex:
            if i == max_http_retries - 1:
                raise ex
            client.close()
            # Cap sleep time to prevent unbounded delays (max 3 seconds per retry)
            sleep(min(i + 1, 3))

    if res.status != 200:
        raise HTTPError(data.decode(), res.status)

    return data


def clickhouse_execute(
    base_args,
    query,
    body=None,
    timeout=30,
    settings=None,
    max_http_retries=5,
    retry_error_codes=False,
):
    return clickhouse_execute_http(
        base_args,
        query,
        body,
        timeout,
        settings,
        max_http_retries=max_http_retries,
        retry_error_codes=retry_error_codes,
    ).strip()


def clickhouse_execute_json(
    base_args, query, timeout=60, settings=None, max_http_retries=5
):
    data = clickhouse_execute_http(
        base_args,
        query,
        None,
        timeout,
        settings,
        "JSONEachRow",
        max_http_retries=max_http_retries,
    )
    if not data:
        return None
    rows = []
    for row in data.strip().splitlines():
        rows.append(json.loads(row))
    return rows


# Should we capture client's stacktraces via SIGTSTP
CAPTURE_CLIENT_STACKTRACE = False


def kill_process_group(pgid, fatal_log):
    try:
        print(f"Processes left in process group {pgid}:")
        print(
            subprocess.check_output(
                f"pgrep --pgroup {pgid} -a", shell=True, stderr=subprocess.STDOUT
            ).decode("utf-8"),
            end="",
        )

        if SANITIZED:
            print("Wait 60 seconds to let sanitizers print the report...")
            sleep(60)

        print(f"Killing process group {pgid}")
        print(f"Processes in process group {pgid}:")
        print(
            subprocess.check_output(
                f"pgrep --pgroup {pgid} -a", shell=True, stderr=subprocess.STDOUT
            ).decode("utf-8"),
            end="",
        )

        if CAPTURE_CLIENT_STACKTRACE:
            # Let's try to dump stacktrace in client (useful to catch issues there)
            os.killpg(pgid, signal.SIGTSTP)
            stacktrace_delay = 0.5 if RELEASE_NON_SANITIZED else 10
            print(f"Wait {stacktrace_delay} seconds for clickhouse utilities to handle SIGTSTP...")
            sleep(stacktrace_delay)

            # Let's check maybe clickhouse already printed the stacktrace, then no need to do the same with gdb
            client_printed_stacktrace = False
            try:
                if fatal_log and os.path.exists(fatal_log):
                    with open(fatal_log, "rb") as f:
                        if (
                            b"This is a signal used for debugging purposes by the user"
                            in f.read()
                        ):
                            client_printed_stacktrace = True
            except:
                print(f"Cannot read {fatal_log}. Ignoring.")

            if not client_printed_stacktrace:
                pgrep = subprocess.run(
                    f"pgrep --pgroup {pgid} clickhouse",
                    shell=True,
                    capture_output=True,
                    text=False,
                )
                processes = pgrep.stdout.decode("utf-8").strip().split("\n")
                processes = map(lambda x: x.strip(), processes)
                processes = filter(lambda x: len(x) > 0, processes)
                processes = list(map(lambda x: int(x.strip()), processes))
                for child in processes:
                    print(get_stacktraces_from_gdb(child))
        # NOTE: this still may leave some processes, that had been
        # created by timeout(1), since it also creates new process
        # group. But this should not be a problem with default
        # options, since the default time for each test is 10min,
        # and this is way more bigger then the timeout for each
        # timeout(1) invocation.
        #
        # But as a workaround we are sending SIGTERM first, and
        # only after SIGKILL, that way timeout(1) will have an
        # ability to terminate childrens (though not always since
        # signals are asynchronous).
        os.killpg(pgid, signal.SIGTERM)
        # We need minimal delay to let processes handle SIGTERM - 0.1 (this may
        # not be enough, but at least something)
        sleep(0.1)
        os.killpg(pgid, signal.SIGKILL)
    except OSError as e:
        if e.errno == ESRCH:
            print(f"Got ESRCH while killing {pgid}. Ignoring.")
        else:
            raise
    print(f"Process group {pgid} should be killed")


def cleanup_child_processes(pid):
    print(f"Child processes of {pid}:")
    try:
        pgid = os.getpgid(os.getpid())
    except OSError as e:
        if e.errno == ESRCH:
            print(f"Process {pid} does not exist. Ignoring.")
            return
        raise
    print(
        subprocess.check_output(
            f"pgrep --parent {pid} -a", shell=True, stderr=subprocess.STDOUT
        ).decode("utf-8"),
        end="",
    )
    # Due to start_new_session=True, it is not enough to kill by PGID, we need
    # to look at children processes as well.
    # But we are hoping that nobody creates session in the tests (though it is
    # possible via timeout(), but we are assuming that they will be killed by
    # timeout).
    pgrep = subprocess.check_output(
        f"pgrep --parent {pid}", shell=True, stderr=subprocess.STDOUT
    )
    proc_list = pgrep.decode("utf-8").strip().split("\n")
    processes = list(map(lambda x: int(x.strip()), proc_list))
    for child in processes:
        try:
            child_pgid = os.getpgid(child)
        except ProcessLookupError as e:
            print(e, f"No such process: {child}")
            continue

        if child_pgid != pgid:
            kill_process_group(child_pgid, None)

    # SIGKILL should not be sent, since this will kill the script itself
    os.killpg(pgid, signal.SIGTERM)


# send signal to all processes in group to avoid hung check triggering
# (to avoid terminating clickhouse-test itself, the signal should be ignored)
def stop_tests():
    signal.signal(signal.SIGTERM, signal.SIG_IGN)
    cleanup_child_processes(os.getpid())
    signal.signal(signal.SIGTERM, signal_handler)


def get_db_engine(args, database_name):
    if args.cloud or args.shared_catalog_stress:
        # use default database: shared catalog / replicated
        return ""
    if args.replicated_database:
        return f" ON CLUSTER test_cluster_database_replicated \
            ENGINE=Replicated('/test/clickhouse/db/{database_name}', \
            '{{shard}}', '{{replica}}')"
    if args.db_engine:
        return " ENGINE=" + args.db_engine
    if args.shared_catalog:
        return " ENGINE=Shared"
    return ""  # Will use default engine


def get_create_database_settings(args, testcase_args):
    create_database_settings = {}
    if testcase_args:
        create_database_settings["log_comment"] = testcase_args.testcase_basename
    if args.db_engine == "Ordinary":
        create_database_settings["allow_deprecated_database_ordinary"] = 1
    return create_database_settings


def get_zookeeper_session_uptime(args):
    try:
        if args.replicated_database:
            return int(
                clickhouse_execute(
                    args,
                    """
            SELECT min(materialize(zookeeperSessionUptime()))
            FROM clusterAllReplicas('test_cluster_database_replicated', system.one)
            """,
                )
            )
        if args.shared_catalog:
            return int(
                clickhouse_execute(
                    args,
                    """
            SELECT min(materialize(zookeeperSessionUptime()))
            FROM clusterAllReplicas('test_cluster_shared_catalog', system.one)
            """,
                )
            )
        return int(clickhouse_execute(args, "SELECT zookeeperSessionUptime()"))
    except Exception:
        return None


def need_retry(args, stdout, stderr, total_time):
    if args.check_zookeeper_session:
        # Sometimes we may get unexpected exception like "Replica is readonly" or "Shutdown is called for table"
        # instead of "Session expired" or "Connection loss"
        # Retry if session was expired during test execution.
        # If ZooKeeper is configured, then it's more reliable than checking stderr,
        # but the following condition is always true if ZooKeeper is not configured.
        session_uptime = get_zookeeper_session_uptime(args)
        if session_uptime is not None and session_uptime < math.ceil(total_time):
            return True
    return any(msg in stdout for msg in MESSAGES_TO_RETRY) or any(
        msg in stderr for msg in MESSAGES_TO_RETRY
    )


def get_processlist_size(args):
    if args.replicated_database:
        return int(
            clickhouse_execute(
                args,
                """
                SELECT
                    count()
                FROM clusterAllReplicas('test_cluster_database_replicated', system.processes)
                WHERE query NOT LIKE '%system.processes%'
                AND query NOT LIKE '%system.minio_%_logs%'
                """,
            ).strip()
        )
    if args.shared_catalog:
        return int(
            clickhouse_execute(
                args,
                """
                SELECT
                    count()
                FROM clusterAllReplicas('test_cluster_shared_catalog', system.processes)
                WHERE query NOT LIKE '%system.processes%'
                AND query NOT LIKE '%system.minio_%_logs%'
                """,
            ).strip()
        )
    return int(
        clickhouse_execute(
            args,
            """
                SELECT
                    count()
                FROM system.processes
                WHERE query NOT LIKE '%system.processes%'
                AND query NOT LIKE '%system.minio_%_logs%'
            """,
        ).strip()
    )


def get_processlist_with_stacktraces(args):
    if args.replicated_database:
        return clickhouse_execute(
            args,
            """
        SELECT materialize(hostName() || '::' || tcpPort()::String) as host_port, *
        -- NOTE: view() here to do JOIN on shards, instead of initiator
        FROM clusterAllReplicas('test_cluster_database_replicated', view(
            SELECT
                p.*,
                arrayStringConcat(groupArray('Thread ID ' || toString(s.thread_id) || '\n' || arrayStringConcat(arrayMap(
                    x -> concat(addressToLine(x), '::', demangle(addressToSymbol(x))),
                    s.trace), '\n') AS stacktrace
                )) AS stacktraces
            FROM system.processes p
            JOIN system.stack_trace s USING (query_id)
            WHERE query NOT LIKE '%system.processes%'
            AND query NOT LIKE '%system.minio_%_logs%'
            GROUP BY p.*
        ))
        ORDER BY elapsed DESC FORMAT Vertical
        """,
            settings={
                "allow_introspection_functions": 1,
            },
            timeout=120,
        )
    if args.shared_catalog:
        return clickhouse_execute(
            args,
            """
        SELECT materialize(hostName() || '::' || tcpPort()::String) as host_port, *
        -- NOTE: view() here to do JOIN on shards, instead of initiator
        FROM clusterAllReplicas('test_cluster_shared_catalog', view(
            SELECT
                p.*,
                arrayStringConcat(groupArray('Thread ID ' || toString(s.thread_id) || '\n' || arrayStringConcat(arrayMap(
                    x -> concat(addressToLine(x), '::', demangle(addressToSymbol(x))),
                    s.trace), '\n') AS stacktrace
                )) AS stacktraces
            FROM system.processes p
            JOIN system.stack_trace s USING (query_id)
            WHERE query NOT LIKE '%system.processes%'
            AND query NOT LIKE '%system.minio_%_logs%'
            GROUP BY p.*
        ))
        ORDER BY elapsed DESC FORMAT Vertical
        """,
            settings={
                "allow_introspection_functions": 1,
            },
            timeout=120,
        )
    return clickhouse_execute(
        args,
        """
        SELECT
            p.*,
            arrayStringConcat(groupArray('Thread ID ' || toString(s.thread_id) || '\n' || arrayStringConcat(arrayMap(
                x -> concat(addressToLine(x), '::', demangle(addressToSymbol(x))),
                s.trace), '\n') AS stacktrace
            )) AS stacktraces
        FROM system.processes p
        JOIN system.stack_trace s USING (query_id)
        WHERE query NOT LIKE '%system.processes%'
        AND query NOT LIKE '%system.minio_%_logs%'
        GROUP BY p.*
        ORDER BY elapsed DESC FORMAT Vertical
        """,
        settings={
            "allow_introspection_functions": 1,
        },
        timeout=120,
    )


def get_transactions_list(args):
    try:
        if args.replicated_database:
            return clickhouse_execute_json(
                args,
                "SELECT materialize((hostName(), tcpPort())) as host, * FROM "
                "clusterAllReplicas('test_cluster_database_replicated', system.transactions)",
            )
        if args.shared_catalog:
            return clickhouse_execute_json(
                args,
                "SELECT materialize((hostName(), tcpPort())) as host, * FROM "
                "clusterAllReplicas('test_cluster_shared_catalog', system.transactions)",
            )
        return clickhouse_execute_json(args, "select * from system.transactions")
    except Exception as e:
        return f"Cannot get list of transactions: {e}"


# collect server stacktraces using gdb
def get_stacktraces_from_gdb(pid):
    try:
        cmd = f"gdb -batch -ex 'thread apply all backtrace' -p {pid}"
        return subprocess.check_output(cmd, shell=True).decode("utf-8")
    except Exception as e:
        print(f"Error occurred while receiving stack traces from gdb (for {pid}): {e}")
        return None


# collect server stacktraces from system.stack_trace table
def get_stacktraces_from_clickhouse(args):
    settings_str = " ".join(
        [
            get_additional_client_options(args),
            "--allow_introspection_functions=1",
            "--skip_unavailable_shards=1",
        ]
    )

    msg = ""

    if args.replicated_database:
        msg = (
            f"{args.client} {settings_str} --query "
            '"SELECT materialize((hostName(), tcpPort())) as host, thread_name, thread_id, query_id, trace, '
            "arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), "
            "arrayMap(x -> addressToLine(x), trace), "
            "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace_str "
            "FROM clusterAllReplicas('test_cluster_database_replicated', 'system.stack_trace') "
            'ORDER BY host, thread_id FORMAT Vertical"'
        )
    elif args.shared_catalog:
        msg = (
            f"{args.client} {settings_str} --query "
            '"SELECT materialize((hostName(), tcpPort())) as host, thread_name, thread_id, query_id, trace, '
            "arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), "
            "arrayMap(x -> addressToLine(x), trace), "
            "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace_str "
            "FROM clusterAllReplicas('test_cluster_shared_catalog', 'system.stack_trace') "
            'ORDER BY host, thread_id FORMAT Vertical"'
        )
    else:
        msg = (
            f"{args.client} {settings_str} --query "
            "\"SELECT thread_name, thread_id, query_id, trace, arrayStringConcat(arrayMap(x, y -> concat(x, ': ', y), "
            "arrayMap(x -> addressToLine(x), trace), "
            "arrayMap(x -> demangle(addressToSymbol(x)), trace)), '\n') as trace_str "
            'FROM system.stack_trace FORMAT Vertical"'
        )

    try:
        return subprocess.check_output(
            msg,
            shell=True,
            stderr=subprocess.STDOUT,
        ).decode("utf-8")
    except Exception as e:
        print(f"Error occurred while receiving stack traces from client: {e}")
        return None


def get_all_server_pids():
    """Get PIDs of clickhouse-server processes belonging to the current process group.

    Scoping to the process group avoids attaching gdb to unrelated servers on
    shared runners where multiple test jobs may run concurrently.
    """
    try:
        pgid = os.getpgrp()
        output = subprocess.check_output(
            f"pgrep -g {pgid} -x clickhouse-server",
            shell=True,
            stderr=subprocess.DEVNULL,
            universal_newlines=True,
        ).strip()
        if output:
            return [int(p) for p in output.split()]
    except Exception:
        pass
    return []


def is_asan_build():
    """Check if the server is an ASan build — gdb disables LeakSanitizer detections."""
    try:
        result = clickhouse_execute(
            args,
            "SELECT count() FROM system.build_options WHERE name = 'CXX_FLAGS' AND position('sanitize=address' IN value)",
        )
        return result != b"0"
    except Exception:
        # If we can't query the server, check environment variable as fallback.
        return bool(os.environ.get("ASAN_OPTIONS"))


def print_stacktraces() -> None:
    server_pid = get_server_pid()

    bt = None

    # Collect gdb stacktraces from all server processes (main + replicas).
    # Skip gdb under ASan — it disables LeakSanitizer detections.
    if not is_asan_build():
        all_pids = get_all_server_pids()
        if all_pids:
            parts = []
            for pid in all_pids:
                label = "main server" if pid == server_pid else "replica"
                print(f"\nCollecting stacktraces from {label} process {pid} with gdb:")
                one_bt = get_stacktraces_from_gdb(pid)
                if one_bt and len(one_bt) >= 1000:
                    parts.append(f"=== PID {pid} ({label}) ===\n{one_bt}")
                else:
                    print(f"Got suspiciously small stacktraces from {pid}: {one_bt}")
            if parts:
                bt = "\n\n".join(parts)

    if bt is None:
        print("\nCollecting stacktraces from system.stacktraces table:")

        bt = get_stacktraces_from_clickhouse(args)

    if bt is not None:
        print(bt)
        return

    print(
        colored(
            f"\nUnable to locate ClickHouse server process listening at TCP port "
            f"{args.tcp_port}. It must have crashed or exited prematurely!",
            args,
            "red",
            attrs=["bold"],
        )
    )


def get_server_pid():
    # lsof does not work in stress tests for some reason
    cmd_lsof = f"lsof -i tcp:{args.tcp_port} -s tcp:LISTEN -Fp | sed 's/^p//p;d'"
    cmd_pidof = "pidof -s clickhouse-server"

    commands = [cmd_lsof, cmd_pidof]
    output = None

    for cmd in commands:
        try:
            output = subprocess.check_output(
                cmd, shell=True, stderr=subprocess.STDOUT, universal_newlines=True
            )
            if output:
                return int(output)
        except Exception as e:
            print(f"Cannot get server pid with {cmd}, got {output}: {e}")

    return None  # most likely server is dead


def colored(text, args, color=None, on_color=None, attrs=None):
    if termcolor and (sys.stdout.isatty() or args.force_color):
        return termcolor.colored(text, color, on_color, attrs)
    return text


class TestStatus(enum.Enum):
    FAIL = "FAIL"
    UNKNOWN = "UNKNOWN"
    OK = "OK"
    SKIPPED = "SKIPPED"
    NOT_FAILED = "NOT_FAILED"


class FailureReason(enum.Enum):
    # FAIL reasons
    TIMEOUT = "Timeout!"
    SERVER_DIED = "server died"
    EXIT_CODE = "return code: "
    STDERR = "having stderror: "
    EXCEPTION = "having exception in stdout: "
    RESULT_DIFF = "result differs with reference: "
    TOO_LONG = (
        f"Test runs too long (> {TEST_MAX_RUN_TIME_IN_SECONDS}s). Make it faster."
    )
    INTERNAL_QUERY_FAIL = "Internal query (CREATE/DROP DATABASE) failed:"

    # SKIPPED reasons
    NOT_SUPPORTED = "not supported"
    NOT_SUPPORTED_IN_CLOUD = "not supported in cloud"
    NOT_SUPPORTED_IN_CLOUD_PREFILTER = "not supported in cloud. prefilter"
    NOT_SUPPORTED_IN_CLOUD_POSTFILTER = "not supported in cloud. postfilter"
    NOT_SUPPORTED_IN_PRIVATE = "not supported in private build"
    DISABLED = "disabled"
    SKIP = "skip"
    NO_JINJA = "no jinja"
    NO_ZOOKEEPER = "no zookeeper"
    NO_SHARD = "no shard"
    FAST_ONLY = "running fast tests only"
    NOT_FAST_ONLY = "running not only fast tests"
    NO_LONG = "not running long tests"
    REPLICATED_DB = "replicated-database"
    NON_ATOMIC_DB = "database engine not Atomic"
    OBJECT_STORAGE = "object-storage"
    COVERAGE = "coverage"
    S3_STORAGE = "s3-storage"
    AZURE_BLOB_STORAGE = "azure-blob-storage"
    BUILD = "not running for current build"
    NO_PARALLEL_REPLICAS = "smth in not supported with parallel replicas"
    NO_ASYNC_INSERT = "test is not supported with async inserts enabled"
    NO_LLVM_COVERAGE = "test is not supported with llvm coverage enabled"
    SHARED_MERGE_TREE = "no-shared-merge-tree"
    REQUIRE_REPLICATED_TO_SHARED_MERGE_TREE_REPLACEMENT = "Test with shared_merge_tree in name require option --replace-replicated-with-shared for clickhouse-test"
    SHARED_CATALOG = "no-shared-catalog"
    SHARED_CATALOG_ONLY = "shared-catalog"
    DISTRIBUTED_CACHE = "distributed-cache"
    NO_SHARED_OR_REPLICATED_MERGE_TREE = "no-shared-or-replicated-merge-tree"
    NO_STATELESS = "no-stateless"
    NO_STATEFUL = "no-stateful"
    NO_OPENSSL_FIPS = "no-openssl-fips"
    ENCRYPTED_STORAGE = "encrypted-storage"

    # UNKNOWN reasons
    NO_REFERENCE = "no reference file"
    INTERNAL_ERROR = "Test internal error: "
    NOT_FAILED = (
        "The test succeeded, but it is listed in parallel_replicas_blacklist.txt or async_insert_blacklist.txt. "
        "Please remove it from the list"
    )


def threshold_generator(always_on_prob, always_off_prob, min_val, max_val):
    def gen():
        tmp = random.random()
        if tmp <= always_on_prob:
            return min_val
        if tmp <= always_on_prob + always_off_prob:
            return max_val

        if isinstance(min_val, int) and isinstance(max_val, int):
            return random.randint(min_val, max_val)
        return random.uniform(min_val, max_val)

    return gen


def auto_statistics_generator():
    def gen():
        if random.random() < 0.3:
            return ""

        all_statistic_types = ["minmax", "uniq", "tdigest", 'countmin']
        return ",".join(random.sample(all_statistic_types, random.randint(0, len(all_statistic_types))))

    return gen


def randomize_external_sort_group_by():
    """Returns a dict of 4 settings for external sort/group by.
    The Bytes-vs-Ratio choice is made per call (not per process),
    so each test run gets an independent choice."""
    if random.choice(["Bytes", "Ratio"]) == "Bytes":
        generator = threshold_generator(0.3, 0.5, 0, 10 * 1024 * 1024 * 1024)
        return {
            "max_bytes_before_external_sort": generator(),
            "max_bytes_before_external_group_by": generator(),
            "max_bytes_ratio_before_external_sort": 0,
            "max_bytes_ratio_before_external_group_by": 0,
        }

    generator = threshold_generator(0.3, 0.5, 0.0, 0.20)
    return {
        "max_bytes_before_external_sort": 0,
        "max_bytes_before_external_group_by": 0,
        "max_bytes_ratio_before_external_sort": round(generator(), 2),
        "max_bytes_ratio_before_external_group_by": round(generator(), 2),
    }


# To keep dependency list as short as possible, tzdata is not used here (to
# avoid try/except block for import)
def get_localzone():
    return os.getenv("TZ", "/".join(os.readlink("/etc/localtime").split("/")[-2:]))


# Refer to `tests/integration/helpers/random_settings.py` for integration test random settings
class SettingsRandomizer:
    settings = {
        "max_insert_threads": lambda: (
            12 if random.random() < 0.03 else random.randint(1, 3)
        ),
        "group_by_two_level_threshold": threshold_generator(0.2, 0.2, 1, 1000000),
        "group_by_two_level_threshold_bytes": threshold_generator(
            0.2, 0.2, 1, 50000000
        ),
        "distributed_aggregation_memory_efficient": lambda: random.randint(0, 1),
        "fsync_metadata": lambda: random.randint(0, 1),
        "output_format_parallel_formatting": lambda: random.randint(0, 1),
        "input_format_parallel_parsing": lambda: random.randint(0, 1),
        "min_chunk_bytes_for_parallel_parsing": lambda: max(
            1024, int(random.gauss(10 * 1024 * 1024, 5 * 1000 * 1000))
        ),
        "max_read_buffer_size": lambda: random.randint(500000, 1048576),
        "prefer_localhost_replica": lambda: random.randint(0, 1),
        "max_block_size": lambda: random.randint(8000, 100000),
        "max_joined_block_size_rows": lambda: random.randint(8000, 100000),
        "joined_block_split_single_row": lambda: random.randint(0, 1),
        "join_output_by_rowlist_perkey_rows_threshold": lambda: random.choices([0, 1, 1_000_000, random.randint(0, 1000)], weights=[1, 1, 1, 10])[0],
        "max_threads": lambda: 32 if random.random() < 0.03 else random.randint(1, 3),
        "optimize_append_index": lambda: random.randint(0, 1),
        "use_hedged_requests": lambda: random.randint(0, 1),
        "optimize_if_chain_to_multiif": lambda: random.randint(0, 1),
        "optimize_if_transform_strings_to_enum": lambda: random.randint(0, 1),
        "optimize_read_in_order": lambda: random.randint(0, 1),
        "optimize_or_like_chain": lambda: random.randint(0, 1),
        "optimize_substitute_columns": lambda: random.randint(0, 1),
        "enable_multiple_prewhere_read_steps": lambda: random.randint(0, 1),
        "read_in_order_two_level_merge_threshold": lambda: random.randint(0, 100),
        "optimize_aggregation_in_order": lambda: random.randint(0, 1),
        "aggregation_in_order_max_block_bytes": lambda: random.randint(0, 50000000),
        "use_uncompressed_cache": lambda: random.randint(0, 1),
        "min_bytes_to_use_direct_io": threshold_generator(
            0.2, 0.5, 1, 10 * 1024 * 1024 * 1024
        ),
        "min_bytes_to_use_mmap_io": threshold_generator(
            0.2, 0.5, 1, 10 * 1024 * 1024 * 1024
        ),
        "local_filesystem_read_method": lambda: random.choice(
            ["read", "pread", "mmap", "pread_threadpool"]
        ),
        "remote_filesystem_read_method": lambda: random.choice(["read", "threadpool"]),
        "local_filesystem_read_prefetch": lambda: random.randint(0, 1),
        "filesystem_cache_segments_batch_size": lambda: random.choice(
            [0, 1, 2, 3, 5, 10, 50, 100]
        ),
        "read_from_filesystem_cache_if_exists_otherwise_bypass_cache": lambda: random.randint(
            0, 1
        ),
        "throw_on_error_from_cache_on_write_operations": lambda: random.randint(0, 1),
        "remote_filesystem_read_prefetch": lambda: random.randint(0, 1),
        "distributed_cache_discard_connection_if_unread_data": lambda: random.randint(
            0, 1
        ),
        "distributed_cache_use_clients_cache_for_write": lambda: random.randint(
            0, 1
        ),
        "distributed_cache_use_clients_cache_for_read": lambda: random.randint(
            0, 1
        ),
        "allow_prefetched_read_pool_for_remote_filesystem": lambda: random.randint(
            0, 1
        ),
        "filesystem_prefetch_max_memory_usage": lambda: random.choice(
            ["32Mi", "64Mi", "128Mi"]
        ),
        "filesystem_prefetches_limit": lambda: random.choice(
            [0, 10]
        ),  # 0 means unlimited (but anyway limited by prefetch_max_memory_usage)
        "filesystem_prefetch_min_bytes_for_single_read_task": lambda: random.choice(
            ["1Mi", "8Mi", "16Mi"]
        ),
        "filesystem_prefetch_step_marks": lambda: random.choice(
            [0, 50]
        ),  # 0 means 'auto'
        "filesystem_prefetch_step_bytes": lambda: random.choice(
            [0, "100Mi"]
        ),  # 0 means 'auto'
        "enable_filesystem_cache": lambda: random.randint(0, 1),
        "enable_filesystem_cache_on_write_operations": lambda: random.randint(0, 1),
        "compile_expressions": lambda: random.randint(0, 1),
        "compile_aggregate_expressions": lambda: random.randint(0, 1),
        "compile_sort_description": lambda: random.randint(0, 1),
        "merge_tree_coarse_index_granularity": lambda: random.randint(2, 32),
        "optimize_distinct_in_order": lambda: random.randint(0, 1),
        "max_bytes_before_remerge_sort": lambda: random.randint(1, 3000000000),
        "min_compress_block_size": lambda: random.randint(1, 1048576 * 3),
        "max_compress_block_size": lambda: random.randint(1, 1048576 * 3),
        "merge_tree_compact_parts_min_granules_to_multibuffer_read": lambda: random.randint(
            1, 128
        ),
        "optimize_sorting_by_input_stream_properties": lambda: random.randint(0, 1),
        "http_response_buffer_size": lambda: random.randint(0, 10 * 1048576),
        "http_wait_end_of_query": lambda: random.random() > 0.5,
        "enable_memory_bound_merging_of_aggregation_results": lambda: random.randint(
            0, 1
        ),
        "min_count_to_compile_expression": lambda: random.choice([0, 3]),
        "min_count_to_compile_aggregate_expression": lambda: random.choice([0, 3]),
        "min_count_to_compile_sort_description": lambda: random.choice([0, 3]),
        "session_timezone": lambda: random.choice(
            [
                # special non-deterministic around 1970 timezone, see [1].
                #
                #   [1]: https://github.com/ClickHouse/ClickHouse/issues/42653
                "America/Mazatlan",
                "America/Hermosillo",
                "Mexico/BajaSur",
                # These timezones had DST transitions on some unusual dates (e.g. 2000-01-15 12:00:00).
                "Africa/Khartoum",
                "Africa/Juba",
                # server default that is randomized across all timezones
                # NOTE: due to lots of trickery we cannot use empty timezone here, but this should be the same.
                get_localzone(),
            ]
        ),
        # This setting affects part names and their content which can be read from tables in tests.
        # We have a lot of tests which relies on part names, so it's very unsafe to enable randomization
        # of this setting
        # "prefer_warmed_unmerged_parts_seconds": lambda: random.randint(0, 10),
        "use_page_cache_for_disks_without_file_cache": lambda: random.random() < 0.7,
        "use_page_cache_for_local_disks": lambda: random.random() < 0.3,
        "use_page_cache_for_object_storage": lambda: random.random() < 0.5,
        "page_cache_inject_eviction": lambda: random.random() < 0.5,
        "merge_tree_read_split_ranges_into_intersecting_and_non_intersecting_injection_probability": lambda: round(
            random.random(), 2
        ),
        "prefer_external_sort_block_bytes": lambda: random.choice([0, 1, 100000000]),
        "cross_join_min_rows_to_compress": lambda: random.choice([0, 1, 100000000]),
        "cross_join_min_bytes_to_compress": lambda: random.choice([0, 1, 100000000]),
        "min_external_table_block_size_bytes": lambda: random.choice([0, 1, 100000000]),
        "max_parsing_threads": lambda: random.choice([0, 1, 10]),
        "optimize_functions_to_subcolumns": lambda: random.randint(0, 1),
        "parallel_replicas_local_plan": lambda: random.randint(0, 1),
        "query_plan_join_swap_table": lambda: random.choice(["auto", "false"]), # true forces swap, which is harmfull for some tests
        "enable_vertical_final": lambda: random.randint(0, 1),
        "optimize_extract_common_expressions": lambda: random.randint(0, 1),
        "optimize_syntax_fuse_functions": lambda: random.randint(0, 1),
        "use_async_executor_for_materialized_views": lambda: random.randint(0, 1),
        "use_query_condition_cache": lambda: random.randint(0, 1),
        "secondary_indices_enable_bulk_filtering": lambda: random.randint(0, 1),
        "use_skip_indexes_if_final": lambda: random.randint(0, 1),
        "use_skip_indexes_on_data_read": lambda: random.randint(0, 1),
        "optimize_rewrite_like_perfect_affix": lambda: random.randint(0, 1),
        # Use the new reader most of the time.
        "input_format_parquet_use_native_reader_v3": lambda: min(1, random.randint(0, 5)),
        "enable_lazy_columns_replication": lambda: random.randint(0, 1),
        "allow_special_serialization_kinds_in_output_formats": lambda: random.randint(0, 1),
        "query_plan_read_in_order_through_join": lambda: random.randint(0, 1),
        "read_in_order_use_virtual_row": lambda: random.randint(0, 1),
        "short_circuit_function_evaluation_for_nulls_threshold": lambda: random.random() * 1.1,
        "automatic_parallel_replicas_mode": lambda: 2 if random.random() < 0.25 else 0,
        "temporary_files_buffer_size": lambda: random.randint(800 * 1024, 1200 * 1024),
        # dpsize' - implements DPsize algorithm currently only for Inner joins. So it may not work in some tests.
        # That is why we use it with fallback to 'greedy'.
        "query_plan_optimize_join_order_algorithm": lambda: random.choice(['greedy', 'dpsize,greedy', 'greedy,dpsize']),
    }

    dependent_settings = {
        "use_skip_indexes_if_final_exact_mode": "use_skip_indexes_if_final",
    }

    @staticmethod
    def adjust_settings_for_autopr(args, tags, random_settings):
        # no_parallel_replicas is set for runs with enabled parallel replicas. In this case we should not reset `cluster_for_parallel_replicas`
        if (
            not args.no_parallel_replicas
            and "no-parallel-replicas" not in tags
            and random_settings["automatic_parallel_replicas_mode"] == 2
        ):
            # parallel replicas won't actually be used: since `automatic_parallel_replicas_mode` is 2 we will only collect statistics
            random_settings["enable_parallel_replicas"] = 1
            # seem to throw us over the memory limit in flaky checks with sanitizers
            random_settings["parallel_distributed_insert_select"] = 0
            random_settings["cluster_for_parallel_replicas"] = "parallel_replicas"
            random_settings["parallel_replicas_local_plan"] = 1
            random_settings["parallel_replicas_for_non_replicated_merge_tree"] = 1

    @staticmethod
    def get_random_settings(args, tags):
        random_settings = {}
        is_debug = BuildFlags.DEBUG in args.build_flags
        for setting, generator in SettingsRandomizer.settings.items():
            if (
                is_debug
                and setting == "allow_prefetched_read_pool_for_remote_filesystem"
            ):
                random_settings[setting] = 0
            else:
                random_settings[setting] = generator()
        # Bytes-vs-Ratio choice is made per test, not per process
        random_settings.update(randomize_external_sort_group_by())
        for setting, other_setting in SettingsRandomizer.dependent_settings.items():
            random_settings[setting] = random_settings[other_setting]
        SettingsRandomizer.adjust_settings_for_autopr(args, tags, random_settings)
        return random_settings


class MergeTreeSettingsRandomizer:
    settings = {
        "ratio_of_defaults_for_sparse_serialization": threshold_generator(
            0.3, 0.5, 0.0, 1.0
        ),
        "prefer_fetch_merged_part_size_threshold": threshold_generator(
            0.2, 0.5, 1, 10 * 1024 * 1024 * 1024
        ),
        "vertical_merge_algorithm_min_rows_to_activate": threshold_generator(
            0.4, 0.4, 1, 1000000
        ),
        "vertical_merge_algorithm_min_columns_to_activate": threshold_generator(
            0.4, 0.4, 1, 100
        ),
        "allow_vertical_merges_from_compact_to_wide_parts": lambda: random.randint(
            0, 1
        ),
        "min_merge_bytes_to_use_direct_io": threshold_generator(
            0.25, 0.25, 1, 10 * 1024 * 1024 * 1024
        ),
        "index_granularity_bytes": lambda: random.randint(1024, 30 * 1024 * 1024),
        "merge_max_block_size": lambda: random.randint(1, 8192 * 3),
        "index_granularity": lambda: random.randint(1, 65536),
        "min_bytes_for_wide_part": threshold_generator(0.3, 0.3, 0, 1024 * 1024 * 1024),
        "compress_marks": lambda: random.randint(0, 1),
        "compress_primary_key": lambda: random.randint(0, 1),
        "marks_compress_block_size": lambda: random.randint(8000, 100000),
        "primary_key_compress_block_size": lambda: random.randint(8000, 100000),
        "replace_long_file_name_to_hash": lambda: random.randint(0, 1),
        "max_file_name_length": threshold_generator(0.3, 0.3, 0, 128),
        "min_bytes_for_full_part_storage": threshold_generator(
            0.3, 0.3, 0, 512 * 1024 * 1024
        ),
        "compact_parts_max_bytes_to_buffer": lambda: random.randint(
            1024, 512 * 1024 * 1024
        ),
        "compact_parts_max_granules_to_buffer": threshold_generator(0.15, 0.15, 1, 256),
        "compact_parts_merge_max_bytes_to_prefetch_part": lambda: random.randint(
            1, 32 * 1024 * 1024
        ),
        "cache_populated_by_fetch": lambda: random.randint(0, 1),
        "concurrent_part_removal_threshold": threshold_generator(0.2, 0.3, 0, 100),
        "old_parts_lifetime": threshold_generator(0.2, 0.3, 10, 8 * 60),
        "prewarm_mark_cache": lambda: random.randint(0, 1),
        "use_const_adaptive_granularity": lambda: random.randint(0, 1),
        "enable_index_granularity_compression": lambda: random.randint(0, 1),
        "enable_block_number_column": lambda: random.randint(0, 1),
        "enable_block_offset_column": lambda: random.randint(0, 1),
        "use_primary_key_cache": lambda: random.randint(0, 1),
        "prewarm_primary_key_cache": lambda: random.randint(0, 1),
        "object_serialization_version": lambda: random.choice(["v2", "v3"]),
        "object_shared_data_serialization_version": lambda: random.choice(
            ["map", "map_with_buckets", "advanced"]),
        "object_shared_data_serialization_version_for_zero_level_parts": lambda: random.choice(
            ["map", "map_with_buckets", "advanced"]),
        "object_shared_data_buckets_for_compact_part": lambda: random.randint(1, 32),
        "object_shared_data_buckets_for_wide_part": lambda: random.randint(1, 32),
        "dynamic_serialization_version": lambda: random.choice(["v2", "v3"]),
        "auto_statistics_types": auto_statistics_generator(),
        "serialization_info_version": lambda: random.choice(["basic", "with_types"]),
        "string_serialization_version": lambda: random.choice(["single_stream", "with_size_stream"]),
        "nullable_serialization_version": lambda: random.choice(["basic", "allow_sparse"]),
        "enable_shared_storage_snapshot_in_query": lambda: random.randint(0, 1),
        "min_columns_to_activate_adaptive_write_buffer": lambda: random.randint(0, 1000),
        "reduce_blocking_parts_sleep_ms": lambda: random.randint(200, 2000),
        "shared_merge_tree_outdated_parts_group_size": lambda: random.randint(1, 2),
        "shared_merge_tree_max_outdated_parts_to_process_at_once": lambda: random.randint(1, 10),
        "map_serialization_version": lambda: random.choice(["basic", "with_buckets"]),
        "map_serialization_version_for_zero_level_parts": lambda: random.choice(["basic", "with_buckets"]),
        "max_buckets_in_map": lambda: random.randint(1, 100),
        "map_buckets_strategy": lambda: random.choice(["constant", "sqrt", "linear"]),
        "map_buckets_min_avg_size": lambda: random.randint(1, 1000),
    }

    @staticmethod
    def get_random_settings(args):
        random_settings = {}
        for setting, generator in MergeTreeSettingsRandomizer.settings.items():
            if setting in args.changed_merge_tree_settings:
                continue

            value = generator()
            if type(value) == str and value == "":
                continue

            random_settings[setting] = value

        return random_settings


def replace_in_file(filename, what, with_what):
    with open(filename, "rb") as f:
        data = f.read()
    with open(filename, "wb") as f:
        f.write(data.replace(what.encode(), with_what.encode()))


def replace_in_file_re(filename, what, with_what):
    os.system(f"LC_ALL=C sed -i -e 's/{what}/{with_what}/' {filename}")


def str_to_numeric(v: str):
    vl = v.lower()
    if vl in ("true", "false"):
        return vl == "true"
    try:
        if "." in v:
            return float(v)
        return int(v)
    except ValueError:
        return v


def str_to_bool(v):
    if isinstance(v, str):
        return bool(str_to_numeric(v))
    return bool(v)


def parse_settings_cli_blob(blob: str) -> Dict[str, object]:
    """
    Accepts strings like:
      "--max_threads 1 --fsync_metadata 1 --http_wait_end_of_query True"
    Returns dict: {"max_threads": 1, "fsync_metadata": 1, "http_wait_end_of_query": True}
    """
    if not blob:
        return {}
    tokens = shlex.split(blob)
    out: Dict[str, object] = {}
    i = 0
    while i < len(tokens):
        key = tokens[i].lstrip("-")
        if not key:
            i += 1
            continue
        # value may be missing → treat as boolean flag
        if i + 1 >= len(tokens) or tokens[i + 1].startswith("--"):
            out[key] = True
            i += 1
            continue
        out[key] = str_to_numeric(tokens[i + 1])
        i += 2
    return out


def load_settings_from_file(path: str, *, kind: str) -> Dict[str, object]:
    """
    File must contain ONLY the inner blob (space-separated `--k v` pairs),
    NOT the outer `--settings "..."` or `--merge-tree-settings "..."`.
    """
    with open(path, "r", encoding="utf-8") as f:
        content = f.read().strip()
    bad_prefixes = ("--settings", "--merge-tree-settings")
    for p in bad_prefixes:
        if content.startswith(p):
            raise ValueError(
                f"{kind} file '{path}' must NOT include the leading '{p}'; "
                "put only the inner space-separated --key value pairs."
            )
    return parse_settings_cli_blob(content)


# unlike functools.cache() ignore args
# (since args can be non-hashable)
def cache_ignore_args(func):
    cached_result = None
    has_result = False

    def wrapper(*args, **kwargs):
        nonlocal cached_result, has_result
        if not has_result:
            cached_result = func(*args, **kwargs)
            has_result = True
        return cached_result

    return wrapper


@cache_ignore_args
def is_old_analyzer_used(args):
    return (
        clickhouse_execute(args, "SELECT getSetting('enable_analyzer')").strip()
        == b"false"
    )


def setup_cgroup_with_memory_limit_cb(cgroup_name: str, memory_limit_bytes: int):
    def preexec_fn():
        if CGROUP_VERSION is None:
            print(f"Cgroups are not available, memory limit will not be applied", file=sys.stderr)
            return

        try:
            # NOTE: will not work locally, since I did not find a way to propagate cgroup.subtree_control into the user unit
            if CGROUP_VERSION == 2:
                CGROUP_ROOT = "/sys/fs/cgroup"
                cgroup_path = os.path.join(CGROUP_ROOT, cgroup_name)
                os.makedirs(cgroup_path, exist_ok=True)

                with open(os.path.join(cgroup_path, "memory.max"), "w") as f:
                    f.write(str(memory_limit_bytes))
                with open(os.path.join(cgroup_path, "cgroup.procs"), "w") as f:
                    f.write(str(os.getpid()))
            else:  # v1
                CGROUP_ROOT = "/sys/fs/cgroup/memory"
                cgroup_path = os.path.join(CGROUP_ROOT, cgroup_name)
                os.makedirs(cgroup_path, exist_ok=True)

                with open(os.path.join(cgroup_path, "memory.limit_in_bytes"), "w") as f:
                    f.write(str(memory_limit_bytes))
                with open(os.path.join(cgroup_path, "tasks"), "w") as f:
                    f.write(str(os.getpid()))
        except Exception as e:
            print(f"Failed to configure cgroup {cgroup_name}: {e}", file=sys.stderr)
            # Exit child safely without running exec
            os._exit(1)
    return preexec_fn


def cleanup_cgroup(cgroup_name: str):
    if CGROUP_VERSION is None:
        return

    try:
        if CGROUP_VERSION == 2:
            # Read current process cgroup to find user slice
            with open("/proc/self/cgroup", "r") as f:
                cgroup_line = f.read().strip()
            current_cgroup = cgroup_line.split("::")[1] if "::" in cgroup_line else ""

            if not current_cgroup:
                return

            CGROUP_ROOT = "/sys/fs/cgroup"
            cgroup_path = os.path.join(CGROUP_ROOT, current_cgroup.lstrip("/"), cgroup_name)

            if os.path.exists(cgroup_path):
                os.rmdir(cgroup_path)
        else:  # v1
            CGROUP_ROOT = "/sys/fs/cgroup/memory"
            cgroup_path = os.path.join(CGROUP_ROOT, cgroup_name)

            if os.path.exists(cgroup_path):
                os.rmdir(cgroup_path)
    except Exception as e:
        print(f"Failed to cleanup cgroup {cgroup_name}: {e}", file=sys.stderr)


class TestResult:
    def __init__(
        self,
        case_name: str,
        status: TestStatus,
        reason: Optional[FailureReason],
        total_time: float,
        description: str,
    ):
        self.case_name: str = case_name
        self.status: TestStatus = status
        self.reason: Optional[FailureReason] = reason
        self.total_time: float = total_time
        self.description: str = description
        self.need_retry: bool = False

    def check_if_need_retry(self, args, stdout, stderr, runs_count):
        if (
            self.status != TestStatus.FAIL
            or not need_retry(args, stdout, stderr, self.total_time)
            or MAX_RETRIES < runs_count
            or args.dont_retry_failures
        ):
            return
        self.need_retry = True


class TestCase:
    @staticmethod
    def get_description_from_exception_info(exc_info):
        exc_type, exc_value, tb = exc_info
        exc_name = exc_type.__name__
        traceback_str = "\n".join(traceback.format_tb(tb, 10))
        description = f"\n{exc_name}\n{exc_value}\n{traceback_str}"
        return description

    @staticmethod
    def get_reference_file(args, suite_dir, name):
        """
        Returns reference file name for specified test
        """
        name = removesuffix(name, ".gen")

        file_extentions = [".reference", ".gen.reference"]
        if is_old_analyzer_used(args):
            file_extentions = [".oldanalyzer.reference", *file_extentions]

        for ext in file_extentions:
            reference_file = os.path.join(suite_dir, name) + ext
            if os.path.isfile(reference_file):
                return reference_file
        return None

    def configure_testcase_args(self, args, case_file, suite_tmp_dir):
        testcase_args = copy.deepcopy(args)

        testcase_args.testcase_start_time = datetime.now()
        testcase_basename = os.path.basename(case_file)

        if args.cloud:
            # Sticky host is used by default
            # For additional coverage, queries can be run on different replicas if the test is designed for this
            if "cloud-no-sticky" in self.tags:
                os.environ["CLICKHOUSE_HOST"] = os.environ["CLICKHOUSE_HOST_NON_STICKY"]
            else:
                os.environ["CLICKHOUSE_HOST"] = os.environ["CLICKHOUSE_HOST_STICKY"]

            tcp_host = os.environ["CLICKHOUSE_HOST"]
            testcase_args.tcp_host = tcp_host
            testcase_args.client = re.sub(
                r"--host=[^\s]*", f"--host={tcp_host}", testcase_args.client, count=1
            )

        testcase_args.testcase_basename = testcase_basename

        if testcase_args.database:
            database = testcase_args.database
            os.environ.setdefault("CLICKHOUSE_DATABASE", database)
            os.environ.setdefault("CLICKHOUSE_TMP", suite_tmp_dir)
            os.environ.setdefault("CLICKHOUSE_WRITE_COVERAGE", f"coverage.{database}")
            testcase_args.test_tmp_dir = suite_tmp_dir
        else:
            # If --database is not specified, we will create temporary database with
            # unique name and we will recreate and drop it for each test
            def random_str(length=8):
                alphabet = string.ascii_lowercase + string.digits
                # NOTE: it is important not to use default random generator, since it shares state.
                return "".join(
                    random.SystemRandom().choice(alphabet) for _ in range(length)
                )

            database = f"test_{random_str()}"

            clickhouse_execute(
                args,
                "CREATE DATABASE IF NOT EXISTS "
                + database
                + get_db_engine(testcase_args, database),
                settings=get_create_database_settings(args, testcase_args),
            )

            os.environ["CLICKHOUSE_DATABASE"] = database
            # Set temporary directory to match the randomly generated database,
            # because .sh tests also use it for temporary files and we want to avoid
            # collisions.
            testcase_args.test_tmp_dir = os.path.join(suite_tmp_dir, database)
            os.mkdir(testcase_args.test_tmp_dir)
            os.environ["CLICKHOUSE_TMP"] = testcase_args.test_tmp_dir
            os.environ["CLICKHOUSE_WRITE_COVERAGE"] = f"coverage.{database}"

        testcase_args.testcase_database = database

        # Printed only in case of failures
        #
        # NOTE: here we use "CLICKHOUSE_TMP" instead of "file_suffix",
        # so it is installed in configure_testcase_args() unlike other files
        # (stdout_file, stderr_file) in TestCase::__init__().
        # Since using CLICKHOUSE_TMP is easier to use in expect.
        testcase_args.debug_log_file = (
            os.path.join(testcase_args.test_tmp_dir, testcase_basename) + ".debuglog"
        )

        testcase_args.testcase_client = (
            f"{testcase_args.client} --log_comment '{testcase_args.testcase_basename}-{testcase_args.testcase_database}'"
        )

        return testcase_args

    @staticmethod
    def cli_format_settings(settings_list) -> str:
        out = []
        for k, v in settings_list.items():
            out.extend([f"--{k}", str(v)])
        return " ".join(out)

    @staticmethod
    def http_format_settings(settings_list) -> str:
        return urllib.parse.urlencode(settings_list)

    def has_show_create_table_in_test(self):
        return not subprocess.call(["grep", "-iq", "show create", self.case_file])

    def add_effective_settings(self, client_options):
        new_options = ""
        if self.effective_settings:
            http_params = self.http_format_settings(self.effective_settings)
            if len(self.base_url_params) == 0:
                os.environ["CLICKHOUSE_URL_PARAMS"] = http_params
            else:
                os.environ["CLICKHOUSE_URL_PARAMS"] = (
                    self.base_url_params + "&" + http_params
                )

            new_options += f" {self.cli_format_settings(self.effective_settings)}"

        if self.effective_merge_tree_settings:
            new_options += f" --allow_merge_tree_settings {self.cli_format_settings(self.effective_merge_tree_settings)}"

        if new_options != "":
            new_options += " --allow_repeated_settings"
            os.environ["CLICKHOUSE_CLIENT_OPT"] = (
                self.base_client_options + new_options + " "
            )
        elif client_options:
            client_options += " --allow_repeated_settings"
            os.environ["CLICKHOUSE_CLIENT_OPT"] = (
                self.base_client_options + client_options + " "
            )

        current_value = os.environ.get("CLICKHOUSE_CLIENT_OPT", "")
        os.environ["CLICKHOUSE_CLIENT_OPT"] = f"{current_value} --allow_repeated_settings".strip()

        return client_options + new_options

    def remove_settings_from_env(self):
        os.environ["CLICKHOUSE_URL_PARAMS"] = self.base_url_params
        os.environ["CLICKHOUSE_CLIENT_OPT"] = self.base_client_options

    def add_info_about_settings(self, description):
        if self.effective_settings:
            description += f"\nSettings used in the test: {self.cli_format_settings(self.effective_settings)}"
        if self.effective_merge_tree_settings:
            description += f"\n\nMergeTree settings used in test: {self.cli_format_settings(self.effective_merge_tree_settings)}"

        return description + "\n"

    def apply_random_settings_limits(self, random_settings):
        for setting in random_settings:
            if setting in self.random_settings_limits:
                min_value = self.random_settings_limits[setting][0]
                # min_value can be 0
                if min_value != None and random_settings[setting] < min_value:
                    random_settings[setting] = min_value
                max_value = self.random_settings_limits[setting][1]
                # max_value can be 0
                if max_value != None and random_settings[setting] > max_value:
                    random_settings[setting] = max_value

    def __init__(self, suite: "TestSuite", case: str, args, is_concurrent: bool):
        self.suite = suite
        self.case: str = case  # case file name
        self.args: Namespace = args
        self.tags: Set[str] = suite.all_tags[case]
        self.random_settings_limits = (
            suite.all_random_settings_limits[case]
            if case in suite.all_random_settings_limits
            else {}
        )
        self.memory_limit = suite.all_memory_limits.get(case, args.memory_limit) or args.memory_limit

        for tag in os.getenv("GLOBAL_TAGS", "").split(","):
            self.tags.add(tag.strip())

        self.case_file: str = os.path.join(suite.suite_path, case)
        (self.name, self.ext) = os.path.splitext(case)

        file_suffix = f".{os.getpid()}" if is_concurrent and args.test_runs > 1 else ""
        self.reference_file = self.get_reference_file(args, suite.suite_path, self.name)
        self.stdout_file = (
            os.path.join(suite.suite_tmp_path, self.name) + file_suffix + ".stdout"
        )
        self.stderr_file = (
            os.path.join(suite.suite_tmp_path, self.name) + file_suffix + ".stderr"
        )

        self.testcase_args = None
        self.runs_count = 0

        has_no_random_settings_tag = self.tags and "no-random-settings" in self.tags
        has_no_random_merge_tree_settings_tag = (
            self.tags and "no-random-merge-tree-settings" in self.tags
        )

        allow_random_settings = not (args.no_random_settings or has_no_random_settings_tag)
        # If test contains SHOW CREATE TABLE do not
        # randomize merge tree settings, because
        # they will be added to table definition and test will fail
        allow_random_mt_settings = not (
            args.no_random_merge_tree_settings
            or has_no_random_settings_tag
            or has_no_random_merge_tree_settings_tag
            or self.has_show_create_table_in_test()
        )

        # Base randomized dicts (or empty if not allowed)
        base_settings = {}
        if allow_random_settings:
            base_settings = SettingsRandomizer.get_random_settings(args, self.tags)
            self.apply_random_settings_limits(base_settings)

        base_mt_settings = {}
        if allow_random_mt_settings:
            base_mt_settings = MergeTreeSettingsRandomizer.get_random_settings(args)
            self.apply_random_settings_limits(base_mt_settings)

        # User-specified fixed dicts
        fixed_settings = dict(args.fixed_settings or {})
        fixed_mt_settings = dict(args.fixed_merge_tree_settings or {})

        # - If user passed fixed settings, use them exclusively.
        # - Otherwise, use randomized (if allowed), else empty.
        self.effective_settings = fixed_settings if fixed_settings else base_settings
        self.effective_merge_tree_settings = fixed_mt_settings if fixed_mt_settings else base_mt_settings

        self.base_url_params = (
            os.environ["CLICKHOUSE_URL_PARAMS"]
            if "CLICKHOUSE_URL_PARAMS" in os.environ
            else ""
        )

        self.base_client_options = (
            os.environ["CLICKHOUSE_CLIENT_OPT"]
            if "CLICKHOUSE_CLIENT_OPT" in os.environ
            else ""
        )

        self.show_whitespaces_in_diff = args.show_whitespaces_in_diff

    # should skip test, should increment skipped_total, skip reason
    def should_skip_test(self, suite) -> Optional[FailureReason]:
        tags = self.tags
        args = self.args
        if tags and ("disabled" in tags) and not args.disabled:
            return FailureReason.DISABLED

        if self.name in suite.skip_list:
            return FailureReason.NOT_SUPPORTED

        if args.private and self.name in suite.private_skip_list:
            return FailureReason.NOT_SUPPORTED_IN_PRIVATE

        if args.cloud:
            if "no-replicated-database" in tags:
                return FailureReason.REPLICATED_DB

            if "no-shared-merge-tree" in tags:
                return FailureReason.SHARED_MERGE_TREE

            if "zookeeper" in tags:
                return FailureReason.NO_ZOOKEEPER

            if "replica" in tags:
                return FailureReason.REPLICATED_DB

            if "use-hdfs" in tags:
                return FailureReason.NOT_SUPPORTED_IN_CLOUD

            if "test-type-expect" in tags:
                return FailureReason.NOT_SUPPORTED_IN_CLOUD

            if "no-object-storage" in tags:
                return FailureReason.OBJECT_STORAGE

            if self.name in suite.cloud_skip_list:
                return FailureReason.NOT_SUPPORTED_IN_CLOUD

        if not args.cloud and "cloud" in tags:
            return FailureReason.SKIP

        if (
            os.path.exists(os.path.join(suite.suite_path, self.name) + ".disabled")
            and not args.disabled
        ):
            return FailureReason.DISABLED

        if "no-parallel-replicas" in tags and args.no_parallel_replicas:
            return FailureReason.NO_PARALLEL_REPLICAS

        if "no-async-insert" in tags and args.no_async_insert:
            return FailureReason.NO_ASYNC_INSERT

        # Do not run *Log and Memory engine-specific tests
        if (
            "memory-engine" in tags or "log-engine" in tags
        ) and args.replace_log_memory_with_mergetree:
            return FailureReason.NOT_SUPPORTED

        if args.skip and any(s in self.name for s in args.skip):
            return FailureReason.SKIP

        if not USE_JINJA and self.ext.endswith("j2"):
            return FailureReason.NO_JINJA

        if (
            tags
            and (("zookeeper" in tags) or ("replica" in tags))
            and not args.zookeeper
        ):
            return FailureReason.NO_ZOOKEEPER
        
        if (
            tags
            and (("shard" in tags) or ("distributed" in tags) or ("global" in tags))
            and not args.shard
        ):
            return FailureReason.NO_SHARD

        if tags and ("no-fasttest" in tags) and args.fast_tests_only:
            return FailureReason.FAST_ONLY

        if tags and ("fasttest-only" in tags) and not args.fast_tests_only:
            return FailureReason.NOT_FAST_ONLY

        if (
            tags
            and (("long" in tags) or ("deadlock" in tags) or ("race" in tags))
            and args.no_long
        ):
            # Tests for races and deadlocks usually are run in a loop for a significant amount of time
            return FailureReason.NO_LONG
        
        if (
            tags
            and ("no-llvm-coverage" in tags)
            and args.llvm_coverage
        ):
            # Skip tests that are incompatible with LLVM coverage
            return FailureReason.NO_LLVM_COVERAGE
        
        if tags and ("no-replicated-database" in tags) and args.replicated_database:
            return FailureReason.REPLICATED_DB

        # RocksDB tests cannot be used with replicated database as, in our CI, the databases share the same user_files
        if tags and ("use-rocksdb" in tags) and args.replicated_database:
            return FailureReason.REPLICATED_DB

        if tags and ("no-distributed-cache" in tags) and args.distributed_cache:
            return FailureReason.DISTRIBUTED_CACHE

        if (
            tags
            and ("atomic-database" in tags)
            and (
                args.replicated_database
                or args.shared_catalog
                or args.db_engine not in (None, "Atomic")
            )
        ):
            return FailureReason.NON_ATOMIC_DB

        if (
            tags
            and ("no-shared-merge-tree" in tags)
            and args.replace_replicated_with_shared
        ):
            return FailureReason.SHARED_MERGE_TREE

        if (
            tags
            and ("shared-catalog" in tags)
            and not (args.shared_catalog or args.shared_catalog_stress)
        ):
            return FailureReason.SHARED_CATALOG_ONLY

        if (
            args.shared_catalog
            and not args.cloud
            and not args.shared_catalog_stress
            and not SharedEngineReplacer.has_replicated_or_shared_or_cloud(
                self.case_file
            )
        ):
            return FailureReason.NO_SHARED_OR_REPLICATED_MERGE_TREE

        if args.shared_catalog and not args.shared_catalog_stress and not args.cloud:
            # shared catalog specific tests
            if tags and ("shared-catalog" in tags):
                return None

            if tags and ("no-shared-catalog" in tags):
                return FailureReason.SHARED_CATALOG
            if tags and ("no-replicated-database" in tags):
                return FailureReason.SHARED_CATALOG
            if SharedEngineReplacer.has_non_replicated(self.case_file):
                return FailureReason.SHARED_CATALOG

        if tags and ("no-s3-storage" in tags) and args.s3_storage:
            return FailureReason.S3_STORAGE
        if tags and ("no-azure-blob-storage" in tags) and args.azure_blob_storage:
            return FailureReason.AZURE_BLOB_STORAGE
        if (
            tags
            and ("no-object-storage" in tags)
            and (args.azure_blob_storage or args.s3_storage)
        ):
            return FailureReason.OBJECT_STORAGE
        if (
            tags
            and "no-object-storage-with-slow-build" in tags
            and (args.s3_storage or args.azure_blob_storage)
            and BuildFlags.RELEASE not in args.build_flags
        ):
            return FailureReason.OBJECT_STORAGE
        if tags and ("no-coverage" in tags) and (args.collect_per_test_coverage):
            return FailureReason.COVERAGE

        if "no-batch" in tags and (
            args.run_by_hash_num is not None or args.run_by_hash_total is not None
        ):
            return FailureReason.SKIP

        if "no-flaky-check" in tags and args.flaky_check:
            # skip for flaky and targeted check
            #  main reason for using this tag - a test is not compatible with the thread fuzzer which is used in both jobs
            return FailureReason.SKIP
        if "stateful" in tags and args.no_stateful:
            return FailureReason.NO_STATEFUL
        if "stateful" not in tags and args.no_stateless:
            return FailureReason.NO_STATELESS
        if tags and ("no-openssl-fips" in tags) and args.openssl_fips:
            return FailureReason.NO_OPENSSL_FIPS

        if tags and ("no-encrypted-storage" in tags) and args.encrypted_storage:
            return FailureReason.ENCRYPTED_STORAGE

        # Ignore no-{build} tags in stress-tests job
        if tags and not args.stress_tests:
            for build_flag in args.build_flags:
                if "no-" + build_flag in tags:
                    return FailureReason.BUILD
            if "no-sanitizers" in tags:
                if any(
                    flag in args.build_flags
                    for flag in BuildFlags.SANITIZERS
                ):
                    return FailureReason.BUILD

            for tag in tags:
                tag = tag.replace("-", "_")
                if tag.startswith("use_") and tag not in args.build_flags:
                    return FailureReason.BUILD

        return None

    def should_skip_cloud_test(self) -> tuple[Optional[FailureReason], str]:
        # Try to find restricted keywords in test case file.
        # These features are not supported in the cloud env.
        # Simple regexp grep
        restricted_functionalities = [
            # These table/database engines are not supported in the cloud,
            # and it's not possible to rewrite them automatically
            r"ENGINE\s*=?\s*(Distributed|File|EmbeddedRocksDB|Lazy|MySQL|Dictionary)",
            # CREATE DATABASE with ENGINE clause is restricted to create.
            # Replicated is the default and the only.
            # r"CREATE DATABASE.*ENGINE",
            r"CREATE DATABASE.*ENGINE\s*=?\s*(Memory)",
            # Remote localhost nodes are not available
            r"remote\('127",
            r"remoteSecure\('127",
            r"url\('http://",
            r"localhost:8123",
            # All engines are rewritten to SMT and this old syntax isn't supported in SMT
            r" MergeTree\(.*[0-9]+\)",
            # Transactions are not supported for SMT
            r"BEGIN TRANSACTION",
            # Temporary SMT is not supported:
            # Temporary tables cannot be created with Replicated, Shared or KeeperMap table engines. (INCORRECT_QUERY)
            r"CREATE TEMPORARY",
            # Dictionaries from configs
            r"(flat_ints|hashed_ints|hashed_sparse_ints|cache_ints|complex_hashed_ints|"
            r"complex_cache_ints|one_cell_cache_ints|one_cell_cache_ints_overflow)",
            # Only tables with a Replicated engine...
            r"CREATE\s*WINDOW\s*VIEW",
            # Backups
            r"BACKUP\s*TABLE",
            r"BACKUP\S*DATABASE",
            # Local files are not present
            r"FROM\s*file\(",
            r"FROM\s*fileCluster\(",
            r"FROM\s*mysql\(",
            r"FROM\s*s3\(",
            r" file\(",
            # cloud tests connect to remote nodes
            r"CLICKHOUSE_LOCAL",
            r"CLICKHOUSE_KEEPER_CLIENT",
            r"CLICKHOUSE_CLIENT_BINARY",
            r"CLICKHOUSE_BENCHMARK",
            r"\${?CLICKHOUSE_COMPRESSOR",
            r"CLICKHOUSE_FORMAT",
            "clickhouse-format",
            # This works incorrectly
            r"CLICKHOUSE_URL}&.*ENGINE",
            # Mysql instance is not exist
            r"MYSQL_CLIENT",
            # No postgres
            "psql",
            "postgres",
            "ENGINE ?= ?SQLite",
            "object_storage_type ?= ?azure_blob_storage",
            "clickhouse-disks",
            r"disk\(name",
            r"FROM Disk\('backups",
            "SETTINGS disk",
            "FROM infile",
            r"log_queries_min_type\s*=",
            r"log_queries=",
            # named collections:
            "url_with_headers",
            "s3_conn",
            "s3_conn_db",
            "cache_collection",
            "url_override1",
            "url_override2",
            "remote1",
            "remote2",
            # No local files
            "RESTORE DATABASE ",
            # Doesn't work with replicated
            "non_replicated_deduplication_window",
            # TODO: support python tests
            "from pure_http_client",
            "DB 'test_db'",
            # SMT should have `shared_merge_tree_disable_merges_and_mutations_assignment=1` to
            # fully stop merges, but enabling this setting for all tests will break everything
            "SYSTEM STOP MERGES",
            "SYSTEM START MERGES",
            # https://github.com/ClickHouse/clickhouse-private/issues/10955
            "ATTACH DATABASE",
            r"usr/bin/expect",
            "SYSTEM ENABLE FAILPOINT",
            "SYSTEM FLUSH ASYNC INSERT QUEUE",
            "python3 "
        ]

        with open(self.case_file, "r", encoding="utf-8") as f:
            try:
                test_content = f.read()
            except UnicodeDecodeError:
                # some tests are not possible to read
                # skip restriction check
                return None, ""

        for line in test_content.splitlines():
            for pattern in restricted_functionalities:
                if re.match(f".*{pattern}.*", line, flags=re.IGNORECASE):
                    return (
                        FailureReason.NOT_SUPPORTED_IN_CLOUD_PREFILTER,
                        f"Restriction on '{pattern}' in line '{line}'",
                    )

        return None, ""

    def should_skip_stress_test(self) -> tuple[Optional[FailureReason], str]:
        restricted_functionalities = [
            # Temporary disable join and set engines
            r"ENGINE\s*=?\s*(Set|Join)",
        ]

        with open(self.case_file, "r", encoding="utf-8") as f:
            try:
                test_content = f.read()
            except UnicodeDecodeError:
                # some tests are not possible to read
                # skip restriction check
                return None, ""

        for line in test_content.splitlines():
            for pattern in restricted_functionalities:
                if re.match(f".*{pattern}.*", line, flags=re.IGNORECASE):
                    return (
                        FailureReason.NOT_SUPPORTED_IN_CLOUD_PREFILTER,
                        f"Restriction on '{pattern}' in line '{line}'",
                    )

        return None, ""

    def process_result_impl(self, proc, total_time: float) -> TestResult:
        args = self.args
        kill_output = ""
        if proc:
            if proc.returncode is None:
                f = io.StringIO()
                with redirect_stdout(f):
                    kill_process_group(
                        os.getpgid(proc.pid), self.fatal_sanitizer_prefix
                    )
                kill_output = f.getvalue()

        description = ""

        debug_log = ""
        if os.path.exists(self.testcase_args.debug_log_file):
            with open(self.testcase_args.debug_log_file, "rb") as stream:
                debug_log += self.testcase_args.debug_log_file + ":\n"
                debug_log += str(stream.read(), errors="replace", encoding="utf-8")
                debug_log += "\n"

        stdout = ""
        if os.path.exists(self.stdout_file):
            with open(self.stdout_file, "rb") as stdfd:
                stdout = str(stdfd.read(), errors="replace", encoding="utf-8")

        stderr = kill_output
        for path in glob.glob(self.fatal_sanitizer_prefix + "*"):
            with open(path, "rb") as stream:
                content = str(stream.read(), errors="replace", encoding="utf-8")

                def ignore_sanitizer_line(line):
                    return any(
                        filter(
                            lambda exception: exception in line,
                            IGNORED_SANITIZER_ERRORS,
                        )
                    )

                content = "\n".join(
                    filter(
                        lambda line: not ignore_sanitizer_line(line),
                        content.splitlines(),
                    )
                ).strip()
                if content:
                    stderr += f"Path: {path}\n"
                    stderr += content
                    stderr += "\n"
        if os.path.exists(self.stderr_file):
            with open(self.stderr_file, "rb") as stdfd:
                stderr += str(stdfd.read(), errors="replace", encoding="utf-8")

        if debug_log:
            debug_log = trim_for_log(debug_log, 100)

        if proc:
            if proc.returncode is None:
                if stderr:
                    description += stderr
                if debug_log:
                    description += "\n"
                    description += debug_log
                return TestResult(
                    self.name,
                    TestStatus.FAIL,
                    FailureReason.TIMEOUT,
                    total_time,
                    description,
                )

            if proc.returncode != 0:
                reason = FailureReason.EXIT_CODE
                description += str(proc.returncode)

                if stderr:
                    description += "\n"
                    description += stderr
                if debug_log:
                    description += "\n"
                    description += debug_log

                # Stop on fatal errors like segmentation fault. They are sent to client via logs.
                if " <Fatal> " in stderr:
                    reason = FailureReason.SERVER_DIED

                if (
                    self.testcase_args.stop
                    and (
                        "Connection refused" in stderr
                        or "Attempt to read after eof" in stderr
                    )
                    and "Received exception from server" not in stderr
                    and not check_server_liveness(self.args)
                ):
                    reason = FailureReason.SERVER_DIED

                if os.path.isfile(self.stdout_file):
                    description += ", result:\n\n"
                    with open(self.stdout_file, "rb") as f:
                        description += trim_for_log(
                            f.read().decode("utf-8", errors="ignore")
                        )
                    description += "\n"

                description += f"\nstdout:\n{stdout}\n"
                return TestResult(
                    self.name, TestStatus.FAIL, reason, total_time, description
                )

        if stderr:
            description += "\n"
            description += trim_for_log(stderr)
            description += "\n"
            description += "\nstdout:\n"
            description += trim_for_log(stdout)
            description += "\n"

            if debug_log:
                description += "\n"
                description += debug_log
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.STDERR,
                total_time,
                description,
            )

        # "X-ClickHouse-Exception-Code" included in "Access-Control-Expose-Headers"
        # "Exception" may be included into i.e. metric name
        if (
            re.search(r"(^|\W)(Net|Errno|)Exception(\W|$)", stdout)
            and "X-ClickHouse-Exception-Code" not in stdout
            and "X-ClickHouse-Exception-Tag" not in stdout
        ):
            description += "\n{}\n".format(trim_for_log(stdout, 100))
            if debug_log:
                description += "\n"
                description += debug_log
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.EXCEPTION,
                total_time,
                description,
            )

        if "@@SKIP@@" in stdout:
            skip_reason = stdout.replace("@@SKIP@@", "").rstrip("\n")
            description += " - "
            description += skip_reason
            return TestResult(
                self.name,
                TestStatus.SKIPPED,
                FailureReason.SKIP,
                total_time,
                description,
            )

        if self.reference_file is None:
            # If --record is enabled, create a new reference file
            if self.testcase_args.record:
                import shutil
                # Use the same logic as get_reference_file to determine the correct path
                name = removesuffix(self.name, ".gen")
                # Determine the appropriate extension based on analyzer mode
                if is_old_analyzer_used(self.testcase_args):
                    ext = ".oldanalyzer.reference"
                else:
                    ext = ".reference"
                self.reference_file = os.path.join(self.suite.suite_path, name) + ext
                shutil.copy2(self.stdout_file, self.reference_file)
                description += "\tReference file created\n"
                return TestResult(
                    self.name,
                    TestStatus.OK,
                    None,
                    total_time,
                    description,
                )
            return TestResult(
                self.name,
                TestStatus.UNKNOWN,
                FailureReason.NO_REFERENCE,
                total_time,
                description,
            )

        result_is_different = subprocess.call(
            ["diff", "-q", self.reference_file, self.stdout_file], stdout=PIPE
        )

        if result_is_different:
            # If --record is enabled, copy stdout to reference file
            if self.testcase_args.record:
                import shutil
                shutil.copy2(self.stdout_file, self.reference_file)
                description += "\tReference file updated\n"
                return TestResult(
                    self.name,
                    TestStatus.OK,
                    None,
                    total_time,
                    description,
                )

            with Popen(
                [
                    "diff",
                    "-U",
                    str(self.testcase_args.unified),
                    self.reference_file,
                    self.stdout_file,
                ],
                stdout=PIPE,
            ) as diff_proc:
                if self.show_whitespaces_in_diff:
                    with Popen(
                        ["sed", "-e", "s/[ \t]\\+$/&$/g"],
                        stdin=diff_proc.stdout,
                        stdout=PIPE,
                    ) as sed_proc:
                        diff = sed_proc.communicate()[0]
                else:
                    diff = diff_proc.communicate()[0]
            diff = diff.decode("utf-8", errors="ignore")

            if diff.startswith("Binary files "):
                diff += "Content of stdout:\n===================\n"
                with open(self.stdout_file, "rb") as file:
                    diff += str(file.read())
                diff += "==================="
            description += f"\n{diff}\n"
            if debug_log:
                description += "\n"
                description += debug_log

            # Skip test in case these lines are in result
            # Because we do rewrite all possible tables to SMT
            # and expected result is not correct
            if args.cloud and (
                "+CREATE TABLE" in description
                or "+CREATE TEMPORARY TABLE" in description
                or "+CREATE MATERIALIZED" in description
                # Rewritten to SMT/Replicated tables have a different part naming scheme,
                # starting from all_0_0_0 instead of all_1_1_0
                or "+all_0_0_0" in description
            ):
                return TestResult(
                    self.name,
                    TestStatus.SKIPPED,
                    FailureReason.RESULT_DIFF,
                    total_time,
                    description,
                )

            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.RESULT_DIFF,
                total_time,
                description,
            )

        if (
            self.testcase_args.test_runs > 1
            and self.testcase_args.flaky_check
            and total_time > TEST_MAX_RUN_TIME_IN_SECONDS
            and "long" not in self.tags
        ):
            if debug_log:
                description += "\n"
                description += debug_log
            # We're in Flaky Check mode, check the run time as well while we're at it.
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.TOO_LONG,
                total_time,
                description,
            )

        if os.path.exists(self.stdout_file):
            os.remove(self.stdout_file)
        if os.path.exists(self.stderr_file):
            os.remove(self.stderr_file)
        for path in glob.glob(self.fatal_sanitizer_prefix + "*"):
            os.remove(path)
        if os.path.exists(self.testcase_args.debug_log_file):
            os.remove(self.testcase_args.debug_log_file)

        return TestResult(self.name, TestStatus.OK, None, total_time, description)

    def print_test_time(self, test_time) -> str:
        return f" {test_time:.2f} sec."

    def process_result(self, result: TestResult, messages):
        args = self.args
        description_full = messages[result.status]
        description_full += self.print_test_time(result.total_time)
        if result.reason is not None:
            description_full += f"\nReason: {result.reason.value} "

        description_full += result.description

        if (
            args.collect_per_test_coverage
            and BuildFlags.SANITIZE_COVERAGE in args.build_flags
        ):
            try:
                clickhouse_execute(
                    args,
                    f"INSERT INTO system.coverage_log "
                    f"WITH arrayDistinct(arrayFilter(x -> x != 0, coverageCurrent())) AS coverage_distinct "
                    f"SELECT DISTINCT now(), '{self.case}', coverage_distinct, "
                    f"arrayMap(x -> demangle(addressToSymbol(x)), coverage_distinct) ",
                    retry_error_codes=True,
                )
            except Exception as e:
                print("Cannot insert coverage data: ", str(e))
                traceback.print_exc()
                sys.exit(1)


            # Check for dumped coverage files

            if self.testcase_args and self.testcase_args.testcase_database:
                file_pattern = f"coverage.{self.testcase_args.testcase_database}.*"
                matching_files = glob.glob(file_pattern)
                for file_path in matching_files:
                    try:
                        body = read_file_as_binary_string(file_path)
                        clickhouse_execute(
                            args,
                            "INSERT INTO system.coverage_log "
                            "SETTINGS async_insert=1, wait_for_async_insert=0, async_insert_busy_timeout_min_ms=200, async_insert_busy_timeout_max_ms=1000 "
                            f"WITH arrayDistinct(groupArray(data)) AS coverage_distinct "
                            f"SELECT now(), '{self.case}', coverage_distinct, "
                            f"arrayMap(x -> demangle(addressToSymbol(x)), coverage_distinct) "
                            f"FROM input('data UInt64') FORMAT RowBinary",
                            body=body,
                            retry_error_codes=True,
                        )
                    except Exception as e:
                        print("Cannot insert coverage data: ", str(e))
                        traceback.print_exc()
                        sys.exit(1)

                    # Remove the file even in case of exception to avoid accumulation and quadratic complexity.
                    try:
                        os.remove(file_path)
                    except Exception as e:
                        print("FIXME: Race! Cannot remove coverage file: ", str(e))
            else:
                # self.testcase_args not configured - test was skipped
                pass

            coverage = clickhouse_execute(
                args,
                "SELECT length(coverageCurrent())",
                retry_error_codes=True,
            ).decode()

            description_full += f" (coverage: {coverage})"

        description_full += "\n"

        if result.status == TestStatus.FAIL and self.testcase_args:
            description_full += "Database: " + self.testcase_args.testcase_database

        result.description = description_full
        return result

    def send_test_name_failed(self, suite: str, case: str):
        pid = os.getpid()
        clickhouse_execute(
            self.args,
            f"SELECT 'Running test {suite}/{case} from pid={pid}'",
            retry_error_codes=True,
        )

    def run_single_test(
        self, server_logs_level, client_options
    ) -> Tuple[Optional[Popen], float]:
        args = self.testcase_args
        client = args.testcase_client
        start_time = args.testcase_start_time
        database = args.testcase_database

        # NOTE:
        # - client writes to fatal_sanitizer_prefix
        # - sanitizers write to fatal_sanitizer_prefix.PID
        self.fatal_sanitizer_prefix = f"{self.stderr_file}-fatal"
        # Ensure that we start with empty files
        for path in glob.glob(self.fatal_sanitizer_prefix + "*"):
            os.remove(path)

        log_opt = " --client_logs_file=" + self.fatal_sanitizer_prefix + " "
        client_options += log_opt

        for env_name in [
            "TSAN_OPTIONS",
            "ASAN_OPTIONS",
            "MSAN_OPTIONS",
            "UBSAN_OPTIONS",
        ]:
            current_options = os.environ.get(env_name, None)
            if current_options is None:
                os.environ[env_name] = f"log_path={self.fatal_sanitizer_prefix}"
            elif "log_path=" not in current_options:
                os.environ[env_name] += f":log_path={self.fatal_sanitizer_prefix}"

        os.environ["CLICKHOUSE_CLIENT_OPT"] = (
            os.environ["CLICKHOUSE_CLIENT_OPT"]
            if "CLICKHOUSE_CLIENT_OPT" in os.environ
            else ""
        ) + log_opt

        # This is for .sh tests
        os.environ["CLICKHOUSE_LOG_COMMENT"] = f'{args.testcase_basename}-{args.testcase_database}'

        query_params = ""
        if "need-query-parameters" in self.tags:
            query_params = (
                " --param_CLICKHOUSE_DATABASE="
                + database
                + " --param_CLICKHOUSE_DATABASE_1="
                + database
                + "_1"
            )
            for env_to_param in [
                "CLICKHOUSE_USER_FILES",
                "CLICKHOUSE_USER_FILES_UNIQUE",
            ]:
                value = os.environ.get(env_to_param, None)
                if value:
                    query_params += f" --param_{env_to_param}={value}"

        params = {
            "client": client + " --database=" + database + query_params,
            "logs_level": server_logs_level,
            "options": client_options,
            "test": self.case_file,
            "stdout": self.stdout_file,
            "stderr": self.stderr_file,
            "secure": (
                "--secure" if args.secure and "--secure" not in client_options else ""
            ),
        }

        # >> append to stderr (but not stdout since it is not used there),
        # because there are also output of per test database creation
        pattern = "{test} > {stdout} 2> {stderr}"

        tests_env = os.environ.copy()
        if args.trace:
            tests_env["CLICKHOUSE_BASH_TRACING_FILE"] = args.debug_log_file

        if self.ext == ".sql":
            pattern = (
                "{client} --send_logs_level={logs_level} {secure} --multiquery {options} < "
                + pattern
            )

        command = pattern.format(**params)

        # We want to calculate per-test code coverage. That's why we reset it before each test.
        if (
            args.collect_per_test_coverage
            and args.reset_coverage_before_every_test
            and BuildFlags.SANITIZE_COVERAGE in args.build_flags
        ):
            clickhouse_execute(
                args,
                "SYSTEM RESET COVERAGE",
                retry_error_codes=True,
            )

        preexec_fn = None
        cgroup_name = None
        if args.memory_limit:
            cgroup_name = f"clickhouse-test-{os.getpid()}"
            preexec_fn = setup_cgroup_with_memory_limit_cb(cgroup_name, self.memory_limit)

        # pylint:disable-next=consider-using-with; TODO: fix
        proc = Popen(command, shell=True, env=tests_env, start_new_session=True, preexec_fn=preexec_fn)

        try:
            proc.wait(args.timeout)
        except subprocess.TimeoutExpired:
            # Whether the test timed out will be decided later
            pass
        finally:
            if cgroup_name:
                cleanup_cgroup(cgroup_name)

        total_time = (datetime.now() - start_time).total_seconds()

        # Normalize randomized database names in stdout, stderr files.
        replace_in_file(self.stdout_file, database, "default")
        if args.hide_db_name:
            replace_in_file(self.stderr_file, database, "default")
        if args.replicated_database:
            replace_in_file(self.stdout_file, "/auto_{shard}", "")
            replace_in_file(self.stdout_file, "auto_{replica}", "")
        if args.shared_catalog:
            replace_in_file(self.stdout_file, "auto_{replica}", "")
            replace_in_file(self.stdout_file, "auto_r1", "")

        # Normalize hostname in stdout file.
        replace_in_file(self.stdout_file, socket.gethostname(), "localhost")

        if os.environ.get("CLICKHOUSE_PORT_TCP"):
            replace_in_file(
                self.stdout_file,
                f"PORT {os.environ['CLICKHOUSE_PORT_TCP']}",
                "PORT 9000",
            )
            replace_in_file(
                self.stdout_file,
                f"localhost	{os.environ['CLICKHOUSE_PORT_TCP']}",
                "localhost	9000",
            )

        if os.environ.get("CLICKHOUSE_PORT_TCP_SECURE"):
            replace_in_file(
                self.stdout_file,
                f"PORT {os.environ['CLICKHOUSE_PORT_TCP_SECURE']}",
                "PORT 9440",
            )
            replace_in_file(
                self.stdout_file,
                f"localhost	{os.environ['CLICKHOUSE_PORT_TCP_SECURE']}",
                "localhost	9440",
            )

        if os.environ.get("CLICKHOUSE_PATH"):
            replace_in_file(
                self.stdout_file,
                os.environ["CLICKHOUSE_PATH"],
                "/var/lib/clickhouse",
            )

        if os.environ.get("CLICKHOUSE_PORT_HTTPS"):
            replace_in_file(
                self.stdout_file,
                f"https://localhost:{os.environ['CLICKHOUSE_PORT_HTTPS']}/",
                "https://localhost:8443/",
            )

        if args.cloud:
            replace_in_file(
                self.stdout_file,
                r"ENGINE = SharedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')",
                "ENGINE = SharedMergeTree",
            )
            replace_in_file(
                self.stdout_file,
                r"ENGINE = SharedMergeTree(\'/clickhouse/tables/{uuid}/{shard}\', \'{replica}\')",
                "ENGINE = SharedMergeTree",
            )

            replace_in_file(
                self.stdout_file,
                r"ENGINE = SharedMergeTree('/clickhouse/tables/{uuid}/{shard}', '{replica}')".replace(
                    "'", r"'\''"
                ),
                "ENGINE = SharedMergeTree",
            )
            replace_in_file(
                self.stdout_file,
                r"ENGINE = SharedMergeTree(\\'/clickhouse/tables/{uuid}/{shard}\\', \\'{replica}\\')".replace(
                    "'", r"'\''"
                ),
                "ENGINE = SharedMergeTree",
            )

            replace_in_file_re(
                self.stdout_file,
                r"\(ENGINE = Shared.*MergeTree\)(\([^,.]*\), \([^,.]*\), \([^,.]*\), \([^,.]*\))",
                r"\1(\4, \5)",
            )
            replace_in_file_re(
                self.stdout_file,
                r"\(ENGINE = Shared.*MergeTree\)(\([^,.]*\), \([^,.]*\), \([^,.]*\))",
                r"\1(\4)",
            )

        return proc, total_time

    def run(self, args, suite, client_options):
        start_time = datetime.now()

        try:
            description = ""
            skip_reason = self.should_skip_test(suite)
            if args.cloud and skip_reason is None:
                skip_reason, description = self.should_skip_cloud_test()

            # if args.shared_catalog_stress and skip_reason is None:
            #     skip_reason, description = self.should_skip_stress_test()

            if skip_reason is not None:
                return TestResult(
                    self.name, TestStatus.SKIPPED, skip_reason, 0.0, description
                )

            if args.testname:
                try:
                    self.send_test_name_failed(suite.suite, self.case)
                except Exception:
                    # If health-check failed, double-check server liveness. A single
                    # check may fail because of memory exhaustion, for example.
                    if not check_server_liveness(self.args):
                        return TestResult(
                            self.name,
                            TestStatus.FAIL,
                            FailureReason.SERVER_DIED,
                            0.0,
                            "\nServer does not respond to health check\n",
                        )

            self.runs_count += 1
            self.testcase_args = self.configure_testcase_args(
                args, self.case_file, suite.suite_tmp_path
            )

            client_options = self.add_effective_settings(client_options)

            if not is_valid_utf_8(self.case_file) or (
                self.reference_file and not is_valid_utf_8(self.reference_file)
            ):
                proc, total_time = self.run_single_test(
                    args.server_logs_level, client_options
                )

                result = self.process_result_impl(proc, total_time)
                result.check_if_need_retry(
                    args, result.description, result.description, self.runs_count
                )
                # to avoid breaking CSV parser
                result.description = result.description.replace("\0", "")
            else:

                @contextmanager
                def switch_to_smt():
                    with (
                        SharedEngineReplacer(
                            args,
                            self.case_file,
                            args.replace_replicated_with_shared,
                            args.replace_non_replicated_with_shared,
                            False,
                            args.cloud,
                        ) as test_replacer,
                        SharedEngineReplacer(
                            args,
                            self.reference_file,
                            # cloud_mode_engine=2 in cloud, expect all engines to be Shared*
                            args.replace_replicated_with_shared or args.cloud,
                            args.replace_non_replicated_with_shared or args.cloud,
                            True,
                            args.cloud,
                        ) as reference_replacer,
                    ):
                        old_case_file = self.case_file
                        old_reference_file = self.reference_file
                        self.case_file = test_replacer.get_path()
                        self.reference_file = reference_replacer.get_path()
                        try:
                            yield
                        finally:
                            self.case_file = old_case_file
                            self.reference_file = old_reference_file

                with switch_to_smt():
                    (
                        proc,
                        total_time,
                    ) = self.run_single_test(args.server_logs_level, client_options)

                    result = self.process_result_impl(proc, total_time)

                    result.check_if_need_retry(
                        args,
                        result.description,
                        result.description,
                        self.runs_count,
                    )
                    # to avoid breaking CSV parser
                    result.description = result.description.replace("\0", "")
            if result.status == TestStatus.FAIL:
                result.description = self.add_info_about_settings(result.description)

            if args.cloud and result.status == TestStatus.FAIL:
                # If error is in list of skipped errors, mark this test as SKIPPED
                for skip_error in self.suite.cloud_skip_errors_list:
                    if skip_error in result.description:
                        result.status = TestStatus.SKIPPED
                        result.reason = FailureReason.NOT_SUPPORTED_IN_CLOUD_POSTFILTER
                        break

            if self.name in suite.blacklist_check:
                if result.status == TestStatus.OK:
                    result.status = TestStatus.NOT_FAILED
                    result.reason = FailureReason.NOT_FAILED
                if result.status == TestStatus.FAIL:
                    result.status = TestStatus.SKIPPED

            self._cleanup(result.status == TestStatus.OK)

            return result
        except KeyboardInterrupt as e:
            raise e
        except HTTPError:
            total_time = (datetime.now() - start_time).total_seconds()
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.INTERNAL_QUERY_FAIL,
                total_time,
                self.add_info_about_settings(
                    self.get_description_from_exception_info(sys.exc_info())
                ),
            )
        except socket.timeout:
            total_time = (datetime.now() - start_time).total_seconds()
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.TIMEOUT,
                total_time,
                self.add_info_about_settings(
                    self.get_description_from_exception_info(sys.exc_info())
                ),
            )
        except (ConnectionError, http.client.ImproperConnectionState):
            total_time = (datetime.now() - start_time).total_seconds()
            exc_description = self.add_info_about_settings(
                self.get_description_from_exception_info(sys.exc_info())
            )
            if not check_server_liveness(self.args):
                return TestResult(
                    self.name,
                    TestStatus.FAIL,
                    FailureReason.SERVER_DIED,
                    total_time,
                    exc_description,
                )
            # Server is alive — this was a transient network error.
            return TestResult(
                self.name,
                TestStatus.FAIL,
                FailureReason.EXIT_CODE,
                total_time,
                exc_description,
            )
        except Exception:
            total_time = (datetime.now() - start_time).total_seconds()
            return TestResult(
                self.name,
                TestStatus.UNKNOWN,
                FailureReason.INTERNAL_ERROR,
                total_time,
                self.get_description_from_exception_info(sys.exc_info()),
            )
        finally:
            self.remove_settings_from_env()

    def _cleanup(self, passed):
        args = self.testcase_args

        need_cleanup = not args.database
        if need_cleanup and args.no_drop_if_fail:
            need_cleanup = passed

        if not need_cleanup or args.no_drop:
            return

        time_passed = (datetime.now() - args.testcase_start_time).total_seconds()
        timeout = max(args.timeout - time_passed, 20)

        self._cleanup_database(args, timeout)
        shutil.rmtree(args.test_tmp_dir)

    def _cleanup_database(self, args, timeout):
        database = args.testcase_database

        # Check if the test does not cleanup its tables.
        # Only for newly added tests. Please extend this check to the old tests as well.
        if self.case_file >= "02800":
            leftover_tables = (
                clickhouse_execute(
                    args,
                    f"SHOW TABLES FROM {database}",
                    timeout=timeout,
                    settings={
                        "log_comment": f'{args.testcase_basename}-{args.testcase_database}',
                    },
                )
                .decode()
                .replace("\n", ", ")
            )

            if len(leftover_tables) != 0:
                raise TestException(
                    f"The test should cleanup its tables ({leftover_tables}), otherwise it is inconvenient for running it locally."
                )

        drop_database_query = f"DROP DATABASE IF EXISTS {database}"
        if args.replicated_database:
            drop_database_query += " ON CLUSTER test_cluster_database_replicated"
        drop_database_query += " SYNC"

        # It's possible to get an error "New table appeared in database being dropped or detached. Try again."
        for _ in range(1, 60):
            try:
                clickhouse_execute(
                    args,
                    drop_database_query,
                    timeout=timeout,
                    settings={
                        "log_comment": f'{args.testcase_basename}-{args.testcase_database}',
                    },
                )
            except HTTPError as e:
                if need_retry(args, e.message, e.message, 0):
                    continue
                raise
            break

def parse_to_bytes(size_str):
    units = {
        'B': 1, 'KB': 10**3, 'MB': 10**6, 'GB': 10**9, 'TB': 10**12,
        'KIB': 2**10, 'MIB': 2**20, 'GIB': 2**30, 'TIB': 2**40
    }

    # Use regex to split the number from the unit
    match = re.match(r"^([\d.]+)\s*([a-zA-Z]{1,3})$", size_str.strip())
    if not match:
        raise ValueError("Invalid format. Use '10GB' or '5 MiB'.")

    number, unit = match.groups()
    return int(float(number) * units[unit.upper()])

class TestSuite:
    @staticmethod
    def tests_in_suite_key_func(item: str) -> float:
        if args.order == "random":
            return random.random()

        reverse = 1 if args.order == "asc" else -1

        if -1 == item.find("_"):
            return 99998

        prefix, _ = item.split("_", 1)

        try:
            return reverse * int(prefix)
        except ValueError:
            return 99997

    @staticmethod
    def render_test_template(j2env, suite_dir, test_name):
        """
        Render template for test and reference file if needed
        """

        if j2env is None:
            return test_name

        test_base_name = removesuffix(test_name, ".sql.j2", ".sql")

        reference_file_name = test_base_name + ".reference.j2"
        reference_file_path = os.path.join(suite_dir, reference_file_name)
        if os.path.isfile(reference_file_path):
            tpl = j2env.get_template(reference_file_name)
            tpl.stream().dump(
                os.path.join(suite_dir, test_base_name) + ".gen.reference"
            )

        if test_name.endswith(".sql.j2"):
            tpl = j2env.get_template(test_name)
            generated_test_name = test_base_name + ".gen.sql"
            tpl.stream().dump(os.path.join(suite_dir, generated_test_name))
            return generated_test_name

        return test_name

    @staticmethod
    def read_test_tags_and_random_settings_limits(
        suite_dir: str, all_tests: List[str]
    ) -> (Dict[str, Set[str]], Dict[str, Dict[str, Tuple[int, int]]], Dict[str, int]):
        def get_comment_sign(filename):
            if filename.endswith(".sql") or filename.endswith(".sql.j2"):
                return "--"
            if (
                filename.endswith(".sh")
                or filename.endswith(".py")
                or filename.endswith(".expect")
            ):
                return "#"
            raise TestException(f"Unknown file_extension: {filename}")

        def parse_tags_from_line(line, comment_sign) -> Set[str]:
            if not line.startswith(comment_sign):
                return set()
            tags_str = line[len(comment_sign) :].lstrip()  # noqa: ignore E203
            tags_prefix = "Tags:"
            if not tags_str.startswith(tags_prefix):
                return set()
            tags_str = tags_str[len(tags_prefix) :]  # noqa: ignore E203
            tags = tags_str.split(",")
            tags = {tag.strip() for tag in tags}
            return tags

        def parse_memory_limits_from_line(
            line, comment_sign
        ) -> Dict[str, Tuple[int, int]]:
            if not line.startswith(comment_sign):
                return None
            memory_limits_str = line[len(comment_sign) :].lstrip()
            memory_limits_prefix = "Memory limits:"
            if not memory_limits_str.startswith(memory_limits_prefix):
                return None
            memory_limits_str = memory_limits_str[len(memory_limits_prefix) : ]
            return parse_to_bytes(memory_limits_str)

        def parse_random_settings_limits_from_line(
            line, comment_sign
        ) -> Dict[str, Tuple[int, int]]:
            if not line.startswith(comment_sign):
                return {}
            random_settings_limits_str = line[len(comment_sign) :].lstrip()
            random_settings_limits_prefix = "Random settings limits:"
            if not random_settings_limits_str.startswith(random_settings_limits_prefix):
                return {}
            random_settings_limits_str = random_settings_limits_str[
                len(random_settings_limits_prefix) :
            ]
            # limits are specified in a form 'setting1=(min, max); setting2=(min,max); ...'
            random_settings_limits = {}
            for setting_and_limit in random_settings_limits_str.split(";"):
                setting_and_limit = setting_and_limit.split("=")
                random_settings_limits[setting_and_limit[0].strip()] = make_tuple(
                    setting_and_limit[1]
                )
            return random_settings_limits

        def find_tag_line(lines, comment_sign):
            for line in lines:
                if line.startswith(comment_sign) and line[
                    len(comment_sign) :
                ].lstrip().startswith("Tags:"):
                    return line
            return ""

        def find_random_settings_limits_line(lines, comment_sign):
            for line in lines:
                if line.startswith(comment_sign) and line[
                    len(comment_sign) :
                ].lstrip().startswith("Random settings limits:"):
                    return line
            return ""

        def find_memory_limits_line(lines, comment_sign):
            for line in lines:
                if line.startswith(comment_sign) and line[
                    len(comment_sign) :
                ].lstrip().startswith("Memory limits:"):
                    return line
            return ""

        def load_tags_and_random_settings_limits_from_file(filepath):
            comment_sign = get_comment_sign(filepath)
            need_query_params = False
            with open(filepath, "r", encoding="utf-8") as file:
                try:
                    lines = file.readlines()
                    tag_line = find_tag_line(lines, comment_sign)
                    random_settings_limits_line = find_random_settings_limits_line(lines, comment_sign)
                    memory_limits_line = find_memory_limits_line(lines, comment_sign)
                except UnicodeDecodeError:
                    return [], {}, None
                try:
                    if filepath.endswith(".sql"):
                        for line in lines:
                            if "{CLICKHOUSE_" in line:
                                need_query_params = True
                except UnicodeDecodeError:
                    pass
            parsed_tags = parse_tags_from_line(tag_line, comment_sign)
            if need_query_params:
                parsed_tags.add("need-query-parameters")
            if filepath.endswith(".expect"):
                parsed_tags.add("test-type-expect")
            if ".cloud." in filepath:
                parsed_tags.add("cloud")

            random_settings_limits = parse_random_settings_limits_from_line(random_settings_limits_line, comment_sign)
            memory_limits = parse_memory_limits_from_line(memory_limits_line, comment_sign)

            return parsed_tags, random_settings_limits, memory_limits

        all_tags = {}
        all_random_settings_limits = {}
        all_memory_limits = {}
        start_time = datetime.now()
        for test_name in all_tests:
            (
                tags,
                random_settings_limits,
                memory_limits,
            ) = load_tags_and_random_settings_limits_from_file(
                os.path.join(suite_dir, test_name)
            )  # noqa: ignore E203
            all_tags[test_name] = tags or set()
            if random_settings_limits:
                all_random_settings_limits[test_name] = random_settings_limits
            if memory_limits:
                all_memory_limits[test_name] = memory_limits
        elapsed = (datetime.now() - start_time).total_seconds()
        if elapsed > 1:
            print(
                f"Tags and random settings limits for suite {suite_dir} read in {elapsed:.2f} seconds"
            )
        return all_tags, all_random_settings_limits, all_memory_limits

    def __init__(self, args, suite_path: str, suite_tmp_path: str, suite: str):
        self.args = args
        self.suite_path: str = suite_path
        self.suite_tmp_path: str = suite_tmp_path
        self.suite: str = suite
        self.skip_list: List[str] = []
        self.cloud_skip_list: List[str] = []
        self.private_skip_list: List[str] = []
        self.cloud_skip_errors_list: List[str] = []
        # List of tests included in a blacklist.
        # They will be run, and if they succeed, the test status will be changed to FAIL
        # to notify PR author that the changes in PR fixes this test.
        self.blacklist_check: List[str] = []

        if args.run_by_hash_num is not None and args.run_by_hash_total is not None:
            if args.run_by_hash_num > args.run_by_hash_total:
                raise TestException(
                    f"Incorrect run by hash, value {args.run_by_hash_num} bigger than total {args.run_by_hash_total}"
                )

            def filter_func(x: str) -> bool:
                return bool(
                    stringhash(x) % args.run_by_hash_total == args.run_by_hash_num
                )

        else:

            def filter_func(x: str) -> bool:
                _ = x
                return True

        all_tests = list(self.get_selected_tests(filter_func))

        all_tags_and_random_settings_limits = (
            self.read_test_tags_and_random_settings_limits(self.suite_path, all_tests)
        )
        self.all_tags: Dict[str, Set[str]] = all_tags_and_random_settings_limits[0]
        self.all_random_settings_limits: Dict[str, Dict[str, (int, int)]] = (all_tags_and_random_settings_limits[1])
        self.all_memory_limits: Dict[str, int] = (all_tags_and_random_settings_limits[2])
        self.sequential_tests = []
        self.parallel_tests = []

        self.all_tests = self.apply_test_runs(all_tests)

        self.all_tests.sort(key=self.tests_in_suite_key_func)

        post_run_check_tests = []

        for test_name in self.all_tests:
            if "post-run-check" in self.all_tags[test_name]:
                # post_run_check_tests are tests that supposed to run after all normal tests finished
                post_run_check_tests.append(test_name)
                continue
            if self.is_sequential_test(test_name):
                if not args.no_sequential:
                    self.sequential_tests.append(test_name)
            else:
                if not args.no_parallel:
                    self.parallel_tests.append(test_name)
        self.sequential_tests.extend(post_run_check_tests)

    def apply_test_runs(self, all_tests):
        test_runs = self.args.test_runs
        long_test_runs = max(
            int(self.args.test_runs * self.args.long_test_runs_ratio), 1
        )

        all_tests = map(
            lambda test: [test]
            * (long_test_runs if self.is_long_test(test) else test_runs),
            all_tests,
        )
        all_tests = [item for sublist in all_tests for item in sublist]
        return all_tests

    def is_long_test(self, test_name):
        if test_name not in self.all_tags:
            return False
        return "long" in self.all_tags[test_name]

    def is_sequential_test(self, test_name):
        if args.sequential:
            if any(s in test_name for s in args.sequential):
                return True

        if test_name not in self.all_tags:
            return False

        return ("no-parallel" in self.all_tags[test_name]) or (
            "sequential" in self.all_tags[test_name]
        )

    def get_selected_tests(self, filter_func):
        """
        Find all files with tests, filter, render templates
        """

        j2env = (
            jinja2.Environment(
                loader=jinja2.FileSystemLoader(self.suite_path),
                keep_trailing_newline=True,
            )
            if USE_JINJA
            else None
        )
        if j2env is not None:
            j2env.globals.update(product=itertools.product)

        for test_name in os.listdir(self.suite_path):
            if not is_test_from_dir(self.suite_path, test_name):
                continue
            if self.args.test and not any(
                re.search(pattern, test_name) for pattern in self.args.test
            ):
                continue
            if USE_JINJA and test_name.endswith(".gen.sql"):
                continue

            if not filter_func(test_name):
                continue
            test_name = self.render_test_template(j2env, self.suite_path, test_name)
            yield test_name

    @staticmethod
    def read_test_suite(args, suite_dir_name: str):
        def is_data_present():
            try:
                return int(clickhouse_execute(args, "EXISTS TABLE test.hits"))
            except Exception as e:
                print(
                    "Cannot check if dataset is available, assuming it's not: ", str(e)
                )
                return False

        base_dir = os.path.abspath(args.queries)
        tmp_dir = os.path.abspath(args.tmp)
        suite_path = os.path.join(base_dir, suite_dir_name)

        suite_re_obj = re.search("^[0-9]+_(.*)$", suite_dir_name)
        if not suite_re_obj:  # skip .gitignore and so on
            return None

        suite_tmp_path = os.path.join(tmp_dir, suite_dir_name)
        if not os.path.exists(suite_tmp_path):
            os.makedirs(suite_tmp_path)

        suite = suite_re_obj.group(1)

        if not os.path.isdir(suite_path):
            return None

        if "stateful" in suite and not args.no_stateful and not is_data_present():
            print("Won't run stateful tests because test data wasn't loaded.")
            return None
        if "stateless" in suite and args.no_stateless:
            print("Won't run stateless tests because they were manually disabled.")
            return None
        if "stateful" in suite and args.no_stateful:
            print("Won't run stateful tests because they were manually disabled.")
            return None

        return TestSuite(args, suite_path, suite_tmp_path, suite)


class ServerDied(Exception):
    pass


class GlobalTimeout(Exception):
    pass


def run_tests_array(
    all_tests_with_params: Tuple[
        List[str],
        int,
        TestSuite,
        bool,
        Namespace,
        multiprocessing.sharedctypes.Synchronized,
        multiprocessing.synchronize.Event,
        multiprocessing.sharedctypes.Synchronized,
        multiprocessing.managers.ListProxy,
        int
    ],
):
    (
        all_tests,
        num_tests,
        test_suite,
        is_concurrent,
        args,
        exit_code,
        server_died,
        workers_to_shed,
        restarted_tests,
        process_index
    ) = all_tests_with_params

    os.environ["LLVM_PROFILE_FILE"] = f"clickhouse-client-proc{process_index}-%2m.profraw"

    OP_SQUARE_BRACKET = colored("[", args, attrs=["bold"])
    CL_SQUARE_BRACKET = colored("]", args, attrs=["bold"])

    MSG_FAIL = (
        OP_SQUARE_BRACKET
        + colored(" FAIL ", args, "red", attrs=["bold"])
        + CL_SQUARE_BRACKET
    )
    MSG_UNKNOWN = (
        OP_SQUARE_BRACKET
        + colored(" UNKNOWN ", args, "yellow", attrs=["bold"])
        + CL_SQUARE_BRACKET
    )
    MSG_OK = (
        OP_SQUARE_BRACKET
        + colored(" OK ", args, "green", attrs=["bold"])
        + CL_SQUARE_BRACKET
    )
    MSG_SKIPPED = (
        OP_SQUARE_BRACKET
        + colored(" SKIPPED ", args, "cyan", attrs=["bold"])
        + CL_SQUARE_BRACKET
    )
    MSG_NOT_FAILED = (
        OP_SQUARE_BRACKET
        + colored(" NOT_FAILED ", args, "red", attrs=["bold"])
        + CL_SQUARE_BRACKET
    )

    MESSAGES = {
        TestStatus.FAIL: MSG_FAIL,
        TestStatus.UNKNOWN: MSG_UNKNOWN,
        TestStatus.OK: MSG_OK,
        TestStatus.SKIPPED: MSG_SKIPPED,
        TestStatus.NOT_FAILED: MSG_NOT_FAILED,
    }

    passed_total = 0
    skipped_total = 0
    failures_total = 0
    failures_chain = 0
    start_time = datetime.now()

    client_options = get_additional_client_options(args)

    if num_tests > 0:
        about = "about " if is_concurrent else ""
        proc_name = multiprocessing.current_process().name
        print(f"Running {about}{num_tests} {test_suite.suite} tests ({proc_name}).")

    while True:
        # Check whether this worker has been asked to shed (stop after finishing
        # its previous test). The check happens before popping the next test so
        # no work is lost — the remaining workers will pick up the queue.
        with workers_to_shed.get_lock():
            if workers_to_shed.value > 0:
                workers_to_shed.value -= 1
                break

        if all_tests:
            try:
                case = all_tests.pop(0)
            except IndexError:
                break
        else:
            break

        if server_died.is_set():
            stop_tests()
            raise ServerDied("Server died")

        if args.stop_time and time() > args.stop_time:
            print("\nStop tests run because global time limit is exceeded.\n")
            stop_tests()
            raise GlobalTimeout("Stop tests run because global time limit is exceeded")

        test_case = TestCase(test_suite, case, args, is_concurrent)

        try:
            description = ""
            test_case_name = removesuffix(test_case.name, ".gen", ".sql") + ": "
            # A value to store the cleaning children output to not mess up with the description
            cleanup_output = io.StringIO()

            if is_concurrent:
                description = f"{test_case_name:72}"
            else:
                sys.stdout.flush()
                sys.stdout.write(f"{test_case_name:72}")
                # This flush is needed so you can see the test name of the long
                # running test before it will finish. But don't do it in parallel
                # mode, so that the lines don't mix.
                sys.stdout.flush()

            while True:
                # This is the upper level timeout
                # It helps with completely frozen processes, like in case of gdb errors
                def timeout_handler(_signum, _frame):
                    with redirect_stdout(cleanup_output):
                        stop_tests()
                    raise TimeoutError("Test execution timed out")

                signal.signal(signal.SIGALRM, timeout_handler)
                signal.alarm(int(args.timeout * 1.1) + 60)
                test_result = None
                try:
                    test_result = test_case.run(args, test_suite, client_options)
                    test_result = test_case.process_result(test_result, MESSAGES)
                except TimeoutError:
                    break
                finally:
                    signal.alarm(0)

                if not test_result or not test_result.need_retry:
                    break
                restarted_tests.append(test_result)

            # First print the description, than invoke the check result logic
            description += test_result.description

            if description and not description.endswith("\n"):
                description += "\n"

            if cleanup_output.getvalue():
                description += cleanup_output.getvalue()
                if not cleanup_output.getvalue().endswith("\n"):
                    description += "\n"

            sys.stdout.write(description)
            sys.stdout.flush()

            if test_result.status == TestStatus.OK:
                passed_total += 1
                failures_chain = 0
            elif test_result.status == TestStatus.FAIL:
                failures_total += 1
                failures_chain += 1
                if test_result.reason == FailureReason.SERVER_DIED:
                    stop_tests()
                    server_died.set()
                    raise ServerDied("Server died")
            elif test_result.status == TestStatus.SKIPPED:
                skipped_total += 1

        except KeyboardInterrupt as e:
            print(colored("Break tests execution", args, "red"))
            stop_tests()
            raise e

        if failures_chain >= args.max_failures_chain:
            stop_tests()
            raise ServerDied("Max failures chain")

    if failures_total > 0:
        print(
            colored(
                f"\nHaving {failures_total} errors! {passed_total} tests passed."
                f" {skipped_total} tests skipped."
                f" {(datetime.now() - start_time).total_seconds():.2f} s elapsed"
                f" ({multiprocessing.current_process().name}).",
                args,
                "red",
                attrs=["bold"],
            )
        )
        exit_code.value = 1
    else:
        print(
            colored(
                f"\n{passed_total} tests passed. {skipped_total} tests skipped."
                f" {(datetime.now() - start_time).total_seconds():.2f} s elapsed"
                f" ({multiprocessing.current_process().name}).",
                args,
                "green",
                attrs=["bold"],
            )
        )

    sys.stdout.flush()


def check_server_liveness(args, max_retries=10) -> bool:
    """Check if server is responding, with retries and exponential back-off.

    Used to distinguish a real server death from a transient network failure
    (e.g. the server being briefly unresponsive due to memory pressure).
    Returns True if the server responds within the retry window, False otherwise.
    """
    for attempt in range(max_retries):
        try:
            clickhouse_execute(
                args, "SELECT 1 /*liveness check*/", max_http_retries=1, timeout=10
            )
            return True
        except Exception:
            if attempt < max_retries - 1:
                sleep(min(2**attempt, 10))
    return False


def get_server_memory_fraction(args) -> Optional[float]:
    """Return the fraction of max_server_memory_usage currently in use (0.0–1.0).

    Reads the server process RSS from /proc/<pid>/statm, which is more reliable
    than querying MemoryTracking from system.metrics — the latter can be far
    below actual RSS under sanitizer builds where jemalloc is disabled and the
    memory tracker is not corrected to match RSS.

    The max_server_memory_usage value is fetched once from the server and cached.

    Returns None when the server PID is unavailable, the limit is unconfigured
    (0), or the query/read fails.
    """
    # Cache the hard limit — it does not change at runtime.
    if not hasattr(get_server_memory_fraction, "_max_memory"):
        try:
            get_server_memory_fraction._max_memory = int(
                clickhouse_execute(
                    args,
                    "SELECT value FROM system.server_settings WHERE name = 'max_server_memory_usage'",
                    max_http_retries=1,
                    timeout=5,
                )
                .decode()
                .strip()
            )
        except Exception:
            return None

    maximum = get_server_memory_fraction._max_memory
    if maximum == 0:
        return None

    # Cache the server PID — it does not change during a test run.
    if not hasattr(get_server_memory_fraction, "_server_pid"):
        get_server_memory_fraction._server_pid = get_server_pid()
    pid = get_server_memory_fraction._server_pid
    if pid is None:
        return None

    try:
        # /proc/<pid>/statm fields: size resident shared text lib data dt (in pages)
        with open(f"/proc/{pid}/statm", "r") as f:
            parts = f.read().split()
        rss_pages = int(parts[1])
        rss_bytes = rss_pages * os.sysconf("SC_PAGE_SIZE")
        return rss_bytes / maximum
    except Exception:
        return None


def check_server_started(args):
    print("Connecting to ClickHouse server...", end="")

    sys.stdout.flush()
    retry_count = args.server_check_retries
    query = "SELECT version(), arrayStringConcat(groupArray(value), ' ') FROM system.build_options WHERE name IN ('GIT_HASH', 'GIT_BRANCH')"
    while retry_count > 0:
        try:
            res = (
                str(clickhouse_execute(args, query).decode())
                .strip()
                .replace("\t", " @ ")
            )
            print(" OK")
            print(f"Connected to server {res}")
            sys.stdout.flush()
            return True
        except (
            ConnectionError,
            http.client.ImproperConnectionState,
            SSLEOFError,
            HTTPError,
        ) as e:
            # When connecting to cloud sticky-host:
            # SSLEOFError :  [SSL: UNEXPECTED_EOF_WHILE_READING] EOF occurred in violation of protocol (_ssl.c:1007)

            # HTTPError:
            # Code: 503. Code: 439. DB::Exception: Cannot schedule a task: failed to start the thread (threads=2, jobs=2). (CANNOT_SCHEDULE_TASK)
            if args.hung_check:
                print("Connection error, will retry: ", str(e))
            else:
                print(".", end="")
            sys.stdout.flush()
            retry_count -= 1
            sleep(0.5)
            continue
        except TimeoutError:
            print("\nConnection timeout, will not retry")
            break
        except Exception as e:
            print(
                "\nUexpected exception, will not retry: ",
                type(e).__name__,
                ": ",
                str(e),
            )
            break

    print("\nAll connection tries failed")
    sys.stdout.flush()
    return False


class BuildFlags:
    THREAD = "tsan"
    ADDRESS = "asan"
    UNDEFINED = "ubsan"
    MEMORY = "msan"
    SANITIZE_COVERAGE = "sanitize-coverage"
    SANITIZERS = [THREAD, ADDRESS, UNDEFINED, MEMORY, SANITIZE_COVERAGE]
    DEBUG = "debug"
    RELEASE = "release"
    ORDINARY_DATABASE = "ordinary-database"
    POLYMORPHIC_PARTS = "polymorphic-parts"


# Release and non-sanitizer build
RELEASE_NON_SANITIZED = False
# Build with any sanitizer
SANITIZED = False


def collect_build_flags(args):
    global RELEASE_NON_SANITIZED

    result = []

    value = clickhouse_execute(
        args, "SELECT value FROM system.build_options WHERE name = 'CXX_FLAGS'"
    )
    if b"-fsanitize=thread" in value:
        result.append(BuildFlags.THREAD)
        SANITIZED = True
    elif b"-fsanitize=address" in value:
        result.append(BuildFlags.ADDRESS)
        SANITIZED = True
    elif b"-fsanitize=undefined" in value:
        result.append(BuildFlags.UNDEFINED)
        SANITIZED = True
    elif b"-fsanitize=memory" in value:
        result.append(BuildFlags.MEMORY)
        SANITIZED = True
    elif b"-DSANITIZE_COVERAGE=1" in value:
        result.append(BuildFlags.SANITIZE_COVERAGE)

    value = clickhouse_execute(
        args, "SELECT value FROM system.build_options WHERE name = 'BUILD_TYPE'"
    )
    if b"Debug" in value:
        result.append(BuildFlags.DEBUG)
    elif b"RelWithDebInfo" in value or b"Release" in value:
        result.append(BuildFlags.RELEASE)

    RELEASE_NON_SANITIZED = result == [BuildFlags.RELEASE]

    value = clickhouse_execute(
        args,
        "SELECT value FROM system.settings WHERE name = 'allow_deprecated_database_ordinary'",
    )
    if value == b"1" or args.db_engine == "Ordinary":
        result.append(BuildFlags.ORDINARY_DATABASE)

    value = int(
        clickhouse_execute(
            args,
            "SELECT value FROM system.merge_tree_settings WHERE name = 'min_bytes_for_wide_part'",
        )
    )
    if value == 0:
        result.append(BuildFlags.POLYMORPHIC_PARTS)

    use_flags = clickhouse_execute(
        args,
        "SELECT name FROM system.build_options WHERE name like 'USE_%' AND value in ('ON', '1')",
    )
    for use_flag in use_flags.strip().splitlines():
        use_flag = use_flag.decode().lower()
        result.append(use_flag)

    system_processor = clickhouse_execute(
        args,
        "SELECT value FROM system.build_options WHERE name = 'SYSTEM_PROCESSOR' LIMIT 1",
    ).strip()
    if system_processor:
        result.append(f"cpu-{system_processor.decode().lower()}")

    return result


def collect_changed_merge_tree_settings(args):
    changed_settings = (
        clickhouse_execute(
            args,
            "SELECT name FROM system.merge_tree_settings WHERE changed",
        )
        .strip()
        .splitlines()
    )

    return list(map(lambda s: s.decode(), changed_settings))


def check_table_column(args, database, table, column):
    return (
        int(
            clickhouse_execute(
                args,
                f"""
    SELECT count()
    FROM system.columns
    WHERE database = '{database}' AND table = '{table}' AND name = '{column}'
    """,
            )
        )
        > 0
    )


def get_hostname(args):
    return clickhouse_execute(args, "SELECT hostname()").decode("utf-8")


def suite_key_func(item: str) -> Union[float, Tuple[int, str]]:
    if args.order == "random":
        return random.random()

    if -1 == item.find("_"):
        return 99998, ""

    prefix, suffix = item.split("_", 1)

    try:
        return int(prefix), suffix
    except ValueError:
        return 99997, ""


def extract_key(key: str) -> str:
    return subprocess.getstatusoutput(
        args.extract_from_config + " --try --config " + args.configserver + key
    )[1]


def run_tests_process(*args_, **kwargs):
    return run_tests_array(*args_, **kwargs)


def do_run_tests(
    jobs,
    test_suite: TestSuite,
    args,
    exit_code,
    restarted_tests,
    server_died,
    runner_process_killed,
):
    print(
        "Found",
        len(test_suite.parallel_tests),
        "parallel tests and",
        len(test_suite.sequential_tests),
        "sequential tests",
    )
    total_tests = len(test_suite.sequential_tests) + len(test_suite.parallel_tests)

    if test_suite.parallel_tests:
        tests_n = len(test_suite.parallel_tests)
        jobs = min(jobs, tests_n)

        # If we don't do random shuffling then there will be always
        # nearly the same groups of test suites running concurrently.
        # Thus, if there is a test within group which appears to be broken
        # then it will affect all other tests in a non-random form.
        # So each time a bad test fails - other tests from the group will also fail
        # and this process will be more or less stable.
        # It makes it more difficult to detect real flaky tests,
        # because the distribution and the amount
        # of failures will be nearly the same for all tests from the group.
        # TODO: add shuffle for sequential tests
        if jobs > 1:
            random.shuffle(test_suite.parallel_tests)

        batch_size = len(test_suite.parallel_tests) // jobs
        manager = multiprocessing.Manager()
        parallel_tests = manager.list()
        parallel_tests.extend(test_suite.parallel_tests)
        workers_to_shed = multiprocessing.Value('i', 0)

        processes = []
        for i in range(jobs):
            process = multiprocessing.Process(
                target=run_tests_process,
                args=(
                    (
                        parallel_tests,
                        batch_size,
                        test_suite,
                        True,
                        args,
                        exit_code,
                        server_died,
                        workers_to_shed,
                        restarted_tests,
                        i,  # process index
                    ),
                ),
            )
            processes.append(process)
            process.start()

        # How often (seconds) to query server memory usage.
        MEMORY_CHECK_INTERVAL = 5.0
        # After shedding a worker, wait this long before the next check to give
        # memory time to drop before potentially shedding another worker.
        MEMORY_SHED_COOLDOWN = 30.0
        # Fraction of max_server_memory_usage at which we start shedding workers.
        MEMORY_PRESSURE_THRESHOLD = 0.8
        last_memory_check = time()

        while processes:
            sleep(0.1)
            sys.stdout.flush()

            # Periodically check memory pressure and reduce concurrency if needed.
            # All workers share the same parallel_tests queue, so stopping one
            # from picking up new tests reduces future concurrency without losing
            # any already-queued work.
            now = time()
            if now - last_memory_check >= MEMORY_CHECK_INTERVAL and len(processes) > 1:
                last_memory_check = now
                memory_fraction = get_server_memory_fraction(args)
                if memory_fraction is not None and memory_fraction > MEMORY_PRESSURE_THRESHOLD:
                    # Cap outstanding shed requests so we never drain the pool
                    # to zero: only increment if the number of workers that will
                    # still be running after all pending sheds is greater than 1.
                    with workers_to_shed.get_lock():
                        if len(processes) - workers_to_shed.value > 1:
                            workers_to_shed.value += 1
                            print(
                                f"Server memory at {memory_fraction:.0%} of limit — "
                                f"signalling one worker to stop after its current test "
                                f"({len(processes)} tracked, {workers_to_shed.value} pending shed)"
                            )
                    # Use a longer cooldown so memory has time to drop before
                    # we consider shedding another worker.
                    last_memory_check = time() + MEMORY_SHED_COOLDOWN - MEMORY_CHECK_INTERVAL

            # Periodically check the server for hangs
            # and stop all processes in this case
            if args.hung_check:
                if not check_server_liveness(args):
                    print("Hung check failed: server is not responding")
                    server_died.set()

            if server_died.is_set():
                print("Server died, terminating all processes...")
                # Wait briefly for test results to be written
                sleep(5)
                for p in processes:
                    if p.is_alive():
                        p.terminate()
                # Wait for processes to exit, then force-kill stragglers
                for p in processes:
                    p.join(timeout=10)
                    if p.is_alive():
                        print(f"Force killing process {p.name} (pid {p.pid})")
                        p.kill()
                for p in processes:
                    p.join(timeout=5)
                break

            for p in processes[:]:
                if not p.is_alive():
                    # Check if process was killed with exception
                    if p.exitcode is not None and p.exitcode != 0:
                        print(
                            f"ERROR: Process {p.name} was killed with exit code {p.exitcode}"
                        )
                        runner_process_killed.set()
                    processes.remove(p)

    if test_suite.sequential_tests:
        run_tests_array(
            (
                list(test_suite.sequential_tests),
                len(test_suite.sequential_tests),
                test_suite,
                False,
                args,
                exit_code,
                server_died,
                multiprocessing.Value('i', 0),  # sequential runner is never shed
                restarted_tests,
                jobs + 1
            )
        )

    return total_tests


def is_test_from_dir(suite_dir, case):
    case_file = os.path.join(suite_dir, case)
    # We could also test for executable files (os.access(case_file, os.X_OK),
    # but it interferes with 01610_client_spawn_editor.editor, which is invoked
    # as a query editor in the test, and must be marked as executable.
    return os.path.isfile(case_file) and any(
        case_file.endswith(suppotred_ext) for suppotred_ext in TEST_FILE_EXTENSIONS
    )


def removesuffix(text, *suffixes):
    """
    Added in python 3.9
    https://www.python.org/dev/peps/pep-0616/

    This version can work with several possible suffixes
    """
    for suffix in suffixes:
        if suffix and text.endswith(suffix):
            return text[: -len(suffix)]
    return text


def reportCoverageFor(args, what, query, permissive=False):
    value = clickhouse_execute(args, query).decode()

    if value != "":
        print(f"\nThe following {what} were not covered by tests:\n")
        print(value)
        print("\n")
        return permissive

    return True


# This is high-level coverage on per-component basis (functions, data types, etc.)
# Don't be confused with the code coverage.
def reportCoverage(args):
    clickhouse_execute(args, "SYSTEM FLUSH LOGS")

    return (
        reportCoverageFor(
            args,
            "functions",
            """
            SELECT name
            FROM system.functions
            WHERE NOT is_aggregate AND origin = 'System' AND alias_to = ''
                AND name NOT IN
                (
                    SELECT arrayJoin(used_functions) FROM system.query_log WHERE event_date >= yesterday()
                )
            ORDER BY name
        """,
            True,
        )
        and reportCoverageFor(
            args,
            "aggregate functions",
            """
            SELECT name
            FROM system.functions
            WHERE is_aggregate AND origin = 'System' AND alias_to = ''
                AND name NOT IN
                (
                    SELECT arrayJoin(used_aggregate_functions) FROM system.query_log WHERE event_date >= yesterday()
                )
            ORDER BY name
        """,
        )
        and reportCoverageFor(
            args,
            "aggregate function combinators",
            """
            SELECT name
            FROM system.aggregate_function_combinators
            WHERE NOT is_internal
                AND name NOT IN
                (
                    SELECT arrayJoin(used_aggregate_function_combinators) FROM system.query_log WHERE event_date >= yesterday()
                )
            ORDER BY name
        """,
        )
        and reportCoverageFor(
            args,
            "data type families",
            """
            SELECT name
            FROM system.data_type_families
            WHERE alias_to = '' AND name NOT LIKE 'Interval%'
                AND name NOT IN
                (
                    SELECT arrayJoin(used_data_type_families) FROM system.query_log WHERE event_date >= yesterday()
                )
            ORDER BY name
        """,
        )
    )


def reportLogStats(args):
    clickhouse_execute(args, "SYSTEM FLUSH LOGS")

    query = """
        WITH
            240 AS mins,
            (
                SELECT (count(), sum(length(toValidUTF8(message))))
                FROM system.text_log
                WHERE (now() - toIntervalMinute(mins)) < event_time
            ) AS total
        SELECT
            count() AS count,
            round(count / (total.1), 3) AS `count_%`,
            formatReadableSize(sum(length(toValidUTF8(message)))) AS size,
            round(sum(length(toValidUTF8(message))) / (total.2), 3) AS `size_%`,
            countDistinct(logger_name) AS uniq_loggers,
            countDistinct(thread_id) AS uniq_threads,
            groupArrayDistinct(toString(level)) AS levels,
            round(sum(query_id = '') / count, 3) AS `background_%`,
            message_format_string
        FROM system.text_log
        WHERE (now() - toIntervalMinute(mins)) < event_time
        GROUP BY message_format_string
        ORDER BY count DESC
        LIMIT 100
        FORMAT PrettySpaceNoEscapes
    """
    value = clickhouse_execute(args, query).decode(errors="replace")
    print("\nTop patterns of log messages:\n")
    print(value)
    print("\n")

    query = """
        WITH
            240 AS mins
        SELECT
            count() AS count,
            substr(replaceRegexpAll(toValidUTF8(message), '[^A-Za-z]+', ''), 1, 32) AS pattern,
            substr(any(toValidUTF8(message)), 1, 256) as runtime_message,
            any((extract(source_file, '/[a-zA-Z0-9_]+\\.[a-z]+'), source_line)) as line
        FROM system.text_log
        WHERE (now() - toIntervalMinute(mins)) < event_time AND message_format_string = ''
        GROUP BY pattern
        ORDER BY count DESC
        LIMIT 30
        FORMAT PrettySpaceNoEscapes
    """
    value = clickhouse_execute(args, query).decode(errors="replace")
    print("\nTop messages without format string (fmt::runtime):\n")
    print(value)
    print("\n")

    query = """
        SELECT message_format_string, count(), any(toValidUTF8(message)) AS any_message
        FROM system.text_log
        WHERE (now() - toIntervalMinute(240)) < event_time
        AND (message NOT LIKE (replaceRegexpAll(message_format_string, '{[:.0-9dfx]*}', '%') AS s))
        AND (message NOT LIKE concat('%Exception: ', s, '%'))
        GROUP BY message_format_string ORDER BY count() DESC LIMIT 20 FORMAT PrettySpaceNoEscapes
    """
    value = clickhouse_execute(args, query).decode(errors="replace")
    print("\nTop messages not matching their format strings:\n")
    print(value)
    print("\n")

    query = """
        WITH ('', '({}) Keys: {}', '({}) {}', 'Aggregating', 'Became leader', 'Cleaning queue',
              'Creating set.', 'Cyclic aliases', 'Detaching {}', 'Executing {}', 'Fire events: {}',
              'Found part {}', 'Loaded queue', 'No sharding key', 'No tables', 'Query: {}',
              'Removed', 'Removed part {}', 'Removing parts.', 'Request URI: {}', 'Sending part {}',
              'Sent handshake', 'Starting {}', 'Will mimic {}', 'Writing to {}', 'dropIfEmpty',
              'loadAll {}', '{} ({}:{})', '{} -> {}', '{} {}', '{}: {}', '{}%', 'Read object: {}',
              'New segment: {}', 'Convert overflow', 'Division by zero', 'Files set to {}',
              'Bytes set to {}', 'Numeric overflow', 'Invalid mode: {}',
              'Write file: {}', 'Unable to parse JSONPath', 'Host is empty in S3 URI.', 'Expected end of line',
              'inflate failed: {}{}', 'Center is not valid', 'Column ''{}'' is ambiguous', 'Cannot parse object', 'Invalid date: {}',
              'There is no cache by name: {}', 'No part {} in table', '`{}` should be a String', 'There are duplicate id {}',
              'Invalid replica name: {}', 'Unexpected value {} in enum', 'Unknown BSON type: {}', 'Point is not valid',
              'Invalid qualified name: {}', 'INTO OUTFILE is not allowed', 'Arguments must not be NaN', 'Cell is not valid',
              'brotli decode error{}', 'Invalid H3 index: {}', 'Too large node state size', 'No additional keys found.',
              'Attempt to read after EOF.', 'Replication was stopped', '{}	building file infos', 'Cannot parse uuid {}',
              'Query was cancelled', 'Cancelled merging parts', 'Cancelled mutating parts', 'Log pulling is cancelled',
              'Transaction was cancelled', 'Could not find table: {}', 'Table {} doesn''t exist',
              'Database {} doesn''t exist', 'Dictionary ({}) not found', 'Unknown table function {}',
              'Unknown format {}', 'Unknown explain kind ''{}''', 'Unknown setting {}', 'Unknown input format {}',
              'Unknown identifier: ''{}''', 'User name is empty', 'Expected function, got: {}',
              'Attempt to read after eof', 'String size is too big ({}), maximum: {}'
        ) AS known_short_messages
        SELECT count() AS c, message_format_string, substr(any(toValidUTF8(message)), 1, 120),
            min(if(length(regexpExtract(toValidUTF8(message), '(.*)\\([A-Z0-9_]+\\)')) as prefix_len > 0, prefix_len, length(toValidUTF8(message))) - 26 AS length_without_exception_boilerplate) AS min_length_without_exception_boilerplate
        FROM system.text_log
        WHERE (now() - toIntervalMinute(240)) < event_time
            AND (length(message_format_string) < 16
                OR (message ILIKE '%DB::Exception%' AND length_without_exception_boilerplate < 30))
            AND message_format_string NOT IN known_short_messages
        GROUP BY message_format_string ORDER BY c DESC LIMIT 50 FORMAT PrettySpaceNoEscapes
    """
    value = clickhouse_execute(args, query).decode(errors="replace")
    print("\nTop short messages:\n")
    print(value)
    print("\n")

    query = """
        SELECT max((freq, message_format_string)), level
        FROM (SELECT count() / (SELECT count() FROM system.text_log
              WHERE (now() - toIntervalMinute(240)) < event_time) AS freq,
              min(level) AS level, message_format_string FROM system.text_log
              WHERE (now() - toIntervalMinute(120)) < event_time
              GROUP BY message_format_string ORDER BY freq DESC)
        GROUP BY level
    """
    value = clickhouse_execute(args, query).decode(errors="replace")
    print("\nTop messages by level:\n")
    print(value)
    print("\n")


def try_get_skip_list(base_dir, name, remove_comment=True):
    test_names_to_skip = []
    skip_list_path = os.path.join(base_dir, name)
    if not os.path.exists(skip_list_path):
        return test_names_to_skip

    with open(skip_list_path, "r", encoding="utf-8") as fd:
        for line in fd.read().split("\n"):
            if line == "" or line[0] == " ":
                continue
            if remove_comment:
                line = line.split()[0].strip()
                if line == "":
                    continue
            test_names_to_skip.append(line.strip())

    return test_names_to_skip


def main(args):
    exit_code = multiprocessing.Value("i", 0)
    server_died = multiprocessing.Event()
    runner_process_killed = multiprocessing.Event()
    multiprocessing_manager = multiprocessing.Manager()
    restarted_tests = multiprocessing_manager.list()
    tests_start_time = time()

    if not check_server_started(args):
        msg = "Server is not responding. Cannot execute 'SELECT 1' query."
        if args.hung_check:
            print(msg)
            pid = get_server_pid()
            print("Got server pid", pid)
            print_stacktraces()
        raise TestException(msg)

    if args.cloud:
        # When client connects to cloud instance it connects to random node
        # But some .sh tests do multiple connections and depend on executing queries on single node
        # Special annotation should be enabled for sticky-routing
        # https://docs.dp.clickhouse-dev.com/infrastructure/sticky-routing.html
        # "annotations": {"clickhouse.com/sticky-routing-enabled": "true"}
        hostname = get_hostname(args)
        non_sticky_host = os.getenv("CLICKHOUSE_HOST")
        host = hostname + ".sticky." + os.getenv("CLICKHOUSE_HOST")
        os.environ["CLICKHOUSE_HOST"] = host
        os.environ["CLICKHOUSE_HOST_STICKY"] = host
        os.environ["CLICKHOUSE_HOST_NON_STICKY"] = non_sticky_host
        print("Sticky host:", host)

        user = os.getenv("CLICKHOUSE_CLIENT_USER")
        if user is None:
            raise ConfigException("CLICKHOUSE_CLIENT_USER env is not set")

        password = os.getenv("CLICKHOUSE_CLIENT_PASSWORD")
        if password is None:
            raise ConfigException("CLICKHOUSE_CLIENT_PASSWORD env is not set")

        config = f"""<config>
    <user>{user}</user>
    <password>{password}</password>
    <secure>true</secure>
</config>
        """
        client_config_path = os.path.abspath("clickhouse-client.xml")
        with open(client_config_path, "w", encoding="utf-8") as f:
            f.write(config)

        os.environ["CLICKHOUSE_CONFIG_CLIENT"] = client_config_path
        args.configclient = client_config_path

        # Wait until the sticky host is accessible.
        args_sticky = copy.deepcopy(args)
        args_sticky.tcp_host = host
        args_sticky.client += f" --host={host}"
        # Recommended time to wait - 10 minutes
        args_sticky.server_check_retries = 1500
        if not check_server_started(args_sticky):
            raise TestException(f"Sticky host '{host}' not responding")

    args.build_flags = collect_build_flags(args)
    args.changed_merge_tree_settings = collect_changed_merge_tree_settings(args)

    if args.s3_storage and (BuildFlags.RELEASE not in args.build_flags):
        args.no_random_settings = True

    if args.skip:
        args.skip = set(args.skip)

    if args.replace_replicated_with_shared:
        if not args.skip:
            args.skip = set([])
        args.skip = set(args.skip)

    base_dir = os.path.abspath(args.queries)

    # Keep same default values as in queries/shell_config.sh
    os.environ.setdefault("CLICKHOUSE_BINARY", args.binary)
    # os.environ.setdefault("CLICKHOUSE_CLIENT", args.client)
    os.environ.setdefault("CLICKHOUSE_CONFIG", args.configserver)

    if args.configclient:
        os.environ.setdefault("CLICKHOUSE_CONFIG_CLIENT", args.configclient)

    # Force to print server warnings in stderr
    # Shell scripts could change logging level
    os.environ.setdefault("CLICKHOUSE_CLIENT_SERVER_LOGS_LEVEL", args.server_logs_level)

    # This code is bad as the time is not monotonic
    args.stop_time = None
    if args.global_time_limit:
        args.stop_time = time() + args.global_time_limit

    if args.zookeeper is None:
        args.zookeeper = True

    if args.shard is None:
        args.shard = bool(extract_key(' --key listen_host | grep -E "127.0.0.2|::"'))

    def create_common_database(args, db_name):
        create_database_retries = 0
        last_err = None
        while create_database_retries < MAX_RETRIES:
            start_time = datetime.now()
            try:
                clickhouse_execute(
                    args,
                    f"CREATE DATABASE IF NOT EXISTS {db_name} "
                    f"{get_db_engine(args, db_name)}",
                    settings=get_create_database_settings(args, None),
                )
                return
            except HTTPError as e:
                last_err = e
                total_time = (datetime.now() - start_time).total_seconds()
                if not need_retry(args, e.message, e.message, total_time):
                    break
            create_database_retries += 1

        if last_err:
            raise RuntimeError(f"Failed to create database {db_name} after {MAX_RETRIES} attempts") from last_err
    try:
        if args.database and args.database != "test":
            create_common_database(args, args.database)
        else:
            create_common_database(args, "test")
    except Exception as e:
        print(f"Failed to create databases for tests: {e}")
        server_died.set()
        raise

    if (
        args.collect_per_test_coverage
        and BuildFlags.SANITIZE_COVERAGE in args.build_flags
    ):
        clickhouse_execute(
            args,
            """
                CREATE TABLE IF NOT EXISTS system.coverage_log
                (
                    time DateTime,
                    test_name String,
                    coverage Array(UInt64),
                    symbol Array(String)
                ) ENGINE = MergeTree ORDER BY test_name
                COMMENT 'Contains information about per-test coverage from the CI, but used only for exporting to the CI cluster';
            """,
        )

        # Coverage collected at the system startup before running any tests:
        clickhouse_execute(
            args,
            "INSERT INTO system.coverage_log "
            "WITH arrayDistinct(arrayFilter(x -> x != 0, coverageCurrent())) AS coverage_distinct "
            "SELECT DISTINCT now(), '', coverage_distinct, "
            "arrayMap(x -> demangle(addressToSymbol(x)), coverage_distinct) ",
        )

    total_tests_run = 0
    cloud_skip_list = try_get_skip_list(base_dir, "../queries-no-cloud-tests.txt")
    private_skip_list = try_get_skip_list(base_dir, "../queries-no-private-tests.txt")
    skip_list = []
    blacklist_check = []

    blacklist_check.extend(try_get_skip_list(base_dir, "../analyzer_tech_debt.txt"))

    if args.no_parallel_replicas is True:
        blacklist = try_get_skip_list(base_dir, "../parallel_replicas_blacklist.txt")
        blacklist_check.extend(blacklist)

    if args.no_async_insert is True:
        skip_list_async_inserts = try_get_skip_list(
            base_dir, "../async_insert_blacklist.txt"
        )
        blacklist_check.extend(skip_list_async_inserts)

    if os.getenv("CLOUD_ENABLE_PARALLEL_REPLICAS", "0") == "1":
        private_skip_list_parallel_replicas = try_get_skip_list(
            base_dir, "../queries-no-cloud-no-pr-tests.txt"
        )
        cloud_skip_list.extend(private_skip_list_parallel_replicas)

    # Some unsupported SQL patterns hard to parse from test case file
    # So test case is executed at first and if its exception is in the list,
    # then skip this test
    cloud_skip_errors_list = [
        # SMT arguments restricted
        "Explicit replica_name is specified in SharedMergeTree arguments",
        # old engine syntax
        "Expected two string literal arguments: zookeeper_path and replica_name",
        # executable dicts are not supported
        "Dictionary source of type `executable` is disabled",
        # disable after statistics tests
        # "Create table with statistics is now disabled",
        "Moving tables between databases is not supported for Replicated engine. (NOT_IMPLEMENTED)",
        "Incorrect ATTACH TABLE query for Atomic database engine.",
        "Setting max_partitions_per_insert_block should not be changed.",
        # cloud_mode=1 restricts index_granularity=0
        "table cannot have index granularity (bytes) equals zero",
        # make_distributed_plan add to constraints because it doesn't work yet
        "Setting make_distributed_plan should not be changed"
    ]

    if not args.shared_catalog:
        cloud_skip_errors_list += [
            # disable for SharedCatalog
            "CREATE AS SELECT is not supported with Replicated databases",
            # new message
            "CREATE AS SELECT and POPULATE is not supported with Replicated databases",
        ]
    if args.shared_catalog:
        # In rare cases detach table is not finished and it is expected
        # https://github.com/ClickHouse/clickhouse-private/issues/23651
        cloud_skip_errors_list += ["Cannot attach table with UUID"]

    for suite in sorted(os.listdir(base_dir), key=suite_key_func):
        if server_died.is_set():
            break

        test_suite = TestSuite.read_test_suite(args, suite)
        if test_suite is None:
            continue

        test_suite.skip_list = skip_list
        test_suite.cloud_skip_list = cloud_skip_list
        test_suite.private_skip_list = private_skip_list
        test_suite.cloud_skip_errors_list = cloud_skip_errors_list
        test_suite.blacklist_check = blacklist_check
        total_tests_run += do_run_tests(
            args.jobs,
            test_suite,
            args,
            exit_code,
            restarted_tests,
            server_died,
            runner_process_killed,
        )

    if server_died.is_set():
        exit_code.value = 1

    if runner_process_killed.is_set():
        exit_code.value = 1

    if args.hung_check:
        # Some queries may execute in background for some time after test was finished. This is normal.
        print("Checking the hung queries: ", end="")
        hung_count = 0
        try:
            deadline = datetime.now() + timedelta(seconds=90)
            while datetime.now() < deadline:
                hung_count = get_processlist_size(args)
                if hung_count == 0:
                    print(" done")
                    break
                print(". ", end="")
        except Exception as e:
            print(
                colored(
                    "\nHung check failed. Failed to get processlist size: " + str(e),
                    args,
                    "red",
                    attrs=["bold"],
                )
            )
            exit_code.value = 1

        processlist = ""
        if hung_count > 0:
            try:
                processlist = get_processlist_with_stacktraces(args)
            except Exception as e:
                print(
                    colored(
                        "\nHung check failed. Failed to get processlist with stacktraces: "
                        + str(e),
                        args,
                        "red",
                        attrs=["bold"],
                    )
                )
                exit_code.value = 1

        if processlist:
            print(
                colored(
                    "\nFound hung queries in processlist:", args, "red", attrs=["bold"]
                )
            )
            print(processlist.decode())
            print(get_transactions_list(args))

            print_stacktraces()
            exit_code.value = 1
        else:
            print(colored("\nNo queries hung.", args, "green", attrs=["bold"]))

    if len(restarted_tests) > 0:
        print("\nSome tests were restarted:\n")

        for test_result in restarted_tests:
            print(f"\n{test_result.case_name:72}: ")
            # replace it with lowercase to avoid parsing retried tests as failed
            for status in TestStatus:
                test_result.description = test_result.description.replace(
                    status.value, status.value.lower()
                )
            print(test_result.description)

    if total_tests_run == 0:
        print("No tests were run.")
        sys.exit(1)
    else:
        print("All tests have finished.")

    if int(clickhouse_execute(args, "EXISTS system.trace_log")):
        try:
            clickhouse_execute(args, "SYSTEM FLUSH LOGS system.trace_log")

            unchecked_memory = int(clickhouse_execute(args, f"""
            SELECT sum(size) FROM system.trace_log
            WHERE
                trace_type = 'MemoryAllocatedWithoutCheck'
                AND event_date >= toDate(fromUnixTimestamp({int(tests_start_time)}))
                AND event_time >= fromUnixTimestamp({int(tests_start_time)})
            """))
            if unchecked_memory > 1e9:
                print(f"""
                Too much memory has been allocated without checking for memory limits: {unchecked_memory/1024/1024/1024} GiB.
                Take a look at:

                SELECT arrayStringConcat(symbols, '\\n'), sum(size)
                FROM system.trace_log
                WHERE trace_type = 'MemoryAllocatedWithoutCheck'
                    AND event_date >= toDate(fromUnixTimestamp({int(tests_start_time)}))
                    AND event_time >= fromUnixTimestamp({int(tests_start_time)})
                GROUP BY 1
                ORDER BY 2
                LIMIT -10
                FORMAT Vertical

                See also at
                - MemoryTrackerDebugBlockerInThread
                - AllocatorWithMemoryTracking
                """)
        except Exception as e:
            print(f"Failed get MemoryAllocatedWithoutCheck events from trace_log: {e}")

    if args.report_logs_stats:
        try:
            reportLogStats(args)
        except Exception as e:
            print(f"Failed to get stats about log messages: {e}")

    if args.report_coverage and not reportCoverage(args):
        exit_code.value = 1

    sys.exit(exit_code.value)


def find_binary(name):
    def is_executable(path):
        return os.access(path, os.X_OK) and os.path.isfile(path)

    if is_executable(name):
        return name
    paths = os.environ.get("PATH").split(":")
    for path in paths:
        bin_path = os.path.join(path, name)
        if is_executable(bin_path):
            return bin_path

    # maybe it wasn't in PATH
    bin_path = os.path.join("/usr/local/bin", name)
    if is_executable(bin_path):
        return bin_path
    bin_path = os.path.join("/usr/bin", name)
    if is_executable(bin_path):
        return bin_path

    # Default binary path if source folder contains build
    bin_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), f"../build/programs/{name}")
    if is_executable(bin_path):
        return bin_path

    raise TestException(f"{name} was not found in PATH")


def find_clickhouse_command(binary, command):
    symlink = binary + "-" + command
    if os.access(symlink, os.X_OK):
        return symlink

    # To avoid requiring symlinks (in case you download binary from CI)
    return binary + " " + command


def get_additional_client_options(args):
    if args.client_option:
        client_options = " ".join("--" + option for option in args.client_option)
        if "CLICKHOUSE_CLIENT_OPT" in os.environ:
            return os.environ["CLICKHOUSE_CLIENT_OPT"] + " " + client_options
        return client_options
    if "CLICKHOUSE_CLIENT_OPT" in os.environ:
        return os.environ["CLICKHOUSE_CLIENT_OPT"]
    return ""


def get_additional_client_options_url(args):
    if args.client_option:
        return "&".join(args.client_option)
    return ""


def parse_args():
    parser = ArgumentParser(description="ClickHouse functional tests")
    parser.add_argument("-q", "--queries", help="Path to queries dir")
    parser.add_argument("--tmp", help="Path to tmp dir")

    parser.add_argument(
        "-b",
        "--binary",
        default="clickhouse",
        type=find_binary,
        help="Path to clickhouse binary or name of binary in PATH",
    )

    parser.add_argument("--extract_from_config", help="extract-from-config program")
    parser.add_argument(
        "--configclient", help="Client config (if you do not use default ports)"
    )
    parser.add_argument(
        "--configserver",
        default="/etc/clickhouse-server/config.xml",
        help="Preprocessed server config",
    )
    parser.add_argument(
        "-o", "--output", help="Output xUnit compliant test report directory"
    )
    parser.add_argument(
        "--memory-limit",
        type=int,
        default=0,
        help="Memory limit for each test (in bytes)",
    )
    parser.add_argument(
        "-t",
        "--timeout",
        type=int,
        default=600,
        help="Timeout for each test case in seconds",
    )
    parser.add_argument(
        "--global_time_limit",
        type=int,
        help="Stop if executing more than specified time (after current test is finished)",
    )
    parser.add_argument("test", nargs="*", help="Optional test case name regex")
    parser.add_argument(
        "-d",
        "--disabled",
        action="store_true",
        default=False,
        help="Also run disabled tests",
    )
    parser.add_argument(
        "--stop",
        action="store_true",
        default=None,
        dest="stop",
        help="Stop on network errors",
    )
    parser.add_argument(
        "--order", default="desc", choices=["asc", "desc", "random"], help="Run order"
    )
    parser.add_argument(
        "--testname",
        action="store_true",
        default=None,
        dest="testname",
        help="Make query with test name before test run",
    )
    parser.add_argument("--hung-check", action="store_true", default=False)
    parser.add_argument("--no-left-queries-check", action="store_true", default=False)
    parser.add_argument("--force-color", action="store_true", default=False)
    parser.add_argument(
        "--database", help="Database for tests (random name test_XXXXXX by default)"
    )
    parser.add_argument(
        "--no-drop-if-fail",
        action="store_true",
        help="Do not drop database for test if test has failed",
    )
    parser.add_argument(
        "--no-drop",
        action="store_true",
        help="Do not drop database",
    )

    parser.add_argument(
        "--hide-db-name",
        action="store_true",
        help='Replace random database name with "default" in stderr',
    )
    parser.add_argument(
        "--no-sequential", action="store_true", help="Not run no-parallel"
    )
    parser.add_argument(
        "--no-parallel", action="store_true", help="Run only no-parallel"
    )
    parser.add_argument(
        "-j", "--jobs", default=1, nargs="?", type=int, help="Run all tests in parallel"
    )
    parser.add_argument(
        "--test-runs",
        default=1,
        nargs="?",
        type=int,
        help="Run each test many times (run is considered as flaky check)",
    )
    parser.add_argument(
        "--long-test-runs-ratio",
        default=0.1,
        nargs="?",
        type=float,
        help="Ratio from --test-runs for long tests",
    )
    parser.add_argument(
        "-U",
        "--unified",
        default=10,
        type=int,
        help="output NUM lines of unified context",
    )
    parser.add_argument(
        "-r",
        "--server-check-retries",
        default=180,
        type=int,
        help="Num of tries to execute SELECT 1 before tests started",
    )
    parser.add_argument("--db-engine", help="Database engine name")
    parser.add_argument(
        "--replicated-database",
        action="store_true",
        default=False,
        help="Run tests with Replicated database engine",
    )
    parser.add_argument(
        "--fast-tests-only",
        action="store_true",
        default=False,
        help='Run only fast tests (the tests without the "no-fasttest" tag)',
    )
    parser.add_argument(
        "--no-stateless", action="store_true", help="Disable all stateless tests"
    )
    parser.add_argument(
        "--no-stateful", action="store_true", help="Disable all stateful tests"
    )
    parser.add_argument("--skip", nargs="+", help="Skip these tests")
    parser.add_argument(
        "--sequential",
        nargs="+",
        help="Run all tests sequentially",
    )
    parser.add_argument(
        "--no-long", action="store_true", dest="no_long", help="Do not run long tests"
    )
    parser.add_argument(
        "--record",
        action="store_true",
        default=False,
        help="Automatically update reference files when stdout differs from reference",
    )
    parser.add_argument(
        "--client-option", nargs="+", help="Specify additional client argument"
    )
    parser.add_argument(
        "--check-zookeeper-session",
        action="store_true",
        help="Check ZooKeeper session uptime to determine if failed test should be retried",
    )
    parser.add_argument(
        "--s3-storage",
        action="store_true",
        default=False,
        help="Run tests over s3 storage",
    )
    parser.add_argument(
        "--distributed-cache",
        action="store_true",
        default=False,
        help="Run tests with enabled distributed cache",
    )
    parser.add_argument(
        "--azure-blob-storage",
        action="store_true",
        default=False,
        help="Run tests over azure blob storage",
    )
    parser.add_argument(
        "--no-random-settings",
        action="store_true",
        default=False,
        help="Disable settings randomization",
    )
    parser.add_argument(
        "--no-random-merge-tree-settings",
        action="store_true",
        default=False,
        help="Disable MergeTree settings randomization",
    )
    parser.add_argument(
        "--llvm-coverage",
        action="store_true",
        default=False,
        help="This run will be used to collect code coverage with llvm-cov",
    )
    # Explicit, non-random settings (copy-paste friendly)
    parser.add_argument(
        "--settings",
        type=str,
        help='Explicit ClickHouse settings to use. Paste the CI blob inside quotes, e.g.: '
             '--settings "--max_threads 1 --fsync_metadata 1 ..."',
    )
    parser.add_argument(
        "--settings-file",
        type=str,
        help="Path to a file containing ONLY the inner space-separated settings blob "
             '(e.g. "--max_threads 1 --fsync_metadata 1 ...").',
    )
    parser.add_argument(
        "--merge-tree-settings",
        type=str,
        help='Explicit MergeTree settings to use. Paste the CI blob inside quotes, e.g.: '
             '--merge-tree-settings "--index_granularity 8192 ..."',
    )
    parser.add_argument(
        "--merge-tree-settings-file",
        type=str,
        help="Path to a file containing ONLY the inner space-separated MergeTree settings blob."
    )
    parser.add_argument(
        "--run-by-hash-num",
        type=int,
        help="Run tests matching crc32(test_name) % run_by_hash_total == run_by_hash_num",
    )
    parser.add_argument(
        "--run-by-hash-total",
        type=int,
        help="Total test groups for crc32(test_name) % run_by_hash_total == run_by_hash_num",
    )

    parser.add_argument(
        "--show-whitespaces-in-diff",
        action="store_true",
        help="Display $ characters after line with trailing whitespaces in diff output",
    )
    parser.add_argument(
        "--encrypted-storage",
        action="store_true",
        default=False,
        help="Run tests over encrypted storage",
    )

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--cloud",
        action="store_true",
        default=None,
        dest="cloud",
        help="Run only tests that are supported in ClickHouse Cloud environment",
    )

    group.add_argument(
        "--no-cloud",
        action="store_false",
        default=None,
        dest="cloud",
        help="Run all the tests, including the ones not supported in ClickHouse Cloud environment",
    )
    parser.set_defaults(cloud=False)

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--private",
        action="store_true",
        default=None,
        dest="private",
        help="Run only tests that are supported in the private build",
    )

    group.add_argument(
        "--no-private",
        action="store_false",
        default=None,
        dest="private",
        help="Run all the tests, including the ones not supported in the private build",
    )
    # Only used to skip tests via "../queries-no-private-tests.txt", so it's fine to keep it enabled by default
    parser.set_defaults(private=True)

    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--zookeeper",
        action="store_true",
        default=None,
        dest="zookeeper",
        help="Run zookeeper related tests",
    )
    group.add_argument(
        "--no-zookeeper",
        action="store_false",
        default=None,
        dest="zookeeper",
        help="Do not run zookeeper related tests",
    )
    group = parser.add_mutually_exclusive_group(required=False)
    group.add_argument(
        "--shard",
        action="store_true",
        default=None,
        dest="shard",
        help="Run sharding related tests "
        "(required to clickhouse-server listen 127.0.0.2 127.0.0.3)",
    )
    group.add_argument(
        "--no-shard",
        action="store_false",
        default=None,
        dest="shard",
        help="Do not run shard related tests",
    )

    # TODO: Remove upgrade-check option after release 24.3 and use
    #       ignore_drop_queries_probability option in stress.py as in stress tests
    group.add_argument(
        "--upgrade-check",
        action="store_true",
        help="Run tests for further server upgrade testing by ignoring all"
        "drop queries in tests for collecting data from new version of server",
    )
    parser.add_argument(
        "--flaky-check",
        action="store_true",
        help="Enables CI flakiness detection by running new tests multiple times. This mode adds specific checks for the flaky-check job and requires the --test-runs argument.",
    )
    parser.add_argument(
        "--stress-tests",
        action="store_true",
        help="A CI test configuration that runs all possible test combinations without validating results. This is used to bypass specific test restrictions.",
    )
    parser.add_argument(
        "--secure",
        action="store_true",
        default=False,
        help="Use secure connection to connect to clickhouse-server",
    )
    parser.add_argument(
        "--max-failures-chain",
        default=20,
        type=int,
        help="Max number of failed tests in a row (stop tests if higher)",
    )
    parser.add_argument(
        "--report-coverage",
        action="store_true",
        default=False,
        help="Check what high-level server components were covered by tests",
    )
    parser.add_argument(
        "--collect-per-test-coverage",
        action="store_true",
        default=True,
        help="Create `system.coverage_log` table on the server and collect information about low-level code coverage on a per test basis there",
    )
    parser.add_argument(
        "--reset-coverage-before-every-test",
        action="store_true",
        default=True,
        help="Collect isolated test coverage for every test instead of a cumulative. Useful only when tests are run sequentially.",
    )
    parser.add_argument(
        "--report-logs-stats",
        action="store_true",
        default=False,
        help="Report statistics about log messages",
    )
    parser.add_argument(
        "--no-parallel-replicas",
        action="store_true",
        default=False,
        help="Do not include tests that are not supported with parallel replicas feature",
    )
    parser.add_argument(
        "--no-async-insert",
        action="store_true",
        default=False,
        help="Do not include tests that are not supported with async insert feature",
    )
    parser.add_argument(
        "--replace-replicated-with-shared",
        action="store_true",
        default=str_to_bool(os.environ.get("REPLACE_RMT_WITH_SMT", False)),
        help="Replace ReplicatedMergeTree engine with SharedMergeTree",
    )
    parser.add_argument(
        "--replace-non-replicated-with-shared",
        action="store_true",
        default=str_to_bool(os.environ.get("REPLACE_MT_WITH_SMT", False)),
        help="Replace ordinary MergeTree engine with SharedMergeTree",
    )
    parser.add_argument(
        "--replace-log-memory-with-mergetree",
        action="store_true",
        default=str_to_bool(os.environ.get("REPLACE_LOG_MEM_WITH_MT", False)),
        help="Replace *Log and Memory engines with MergeTree",
    )
    parser.add_argument(
        "--openssl-fips",
        action="store_true",
        default=False,
        help="Do not include tests that fail in OpenSSL FIPS mode",
    )
    parser.add_argument(
        "--shared-catalog",
        action="store_true",
        default=str_to_bool(os.environ.get("USE_SHARED_CATALOG", False)),
        help="Run tests with Shared Catalog",
    )
    parser.add_argument(
        "--shared-catalog-stress",
        action="store_true",
        default=False,
        help="Run tests stress tests with Shared Catalog",
    )
    parser.add_argument(
        "--capture-client-stacktrace",
        action="store_true",
        help="Capture stacktraces from clickhouse-client/local on errors",
    )
    parser.add_argument(
        "--trace", action="store_true", help="Capture various tracing info"
    )

    parser.add_argument(
        "--server-logs-level",
        # warning by default, error in cloud
        default="error" if parser.parse_known_args()[0].cloud else "warning",
        choices=[
            "none",
            "fatal",
            "critical",
            "error",
            "warning",
            "notice",
            "info",
            "debug",
            "trace",
            "test",
        ],
        help="Changes the CH setting 'send_logs_level'",
    )

    parser.add_argument(
        "--dont_retry_failures",
        action="store_true",
        help="Do not retry failed tests",
    )

    return parser.parse_args()


class Terminated(KeyboardInterrupt):
    def __init__(self, signal):
        self.signal = signal


def signal_handler(signal, frame):
    raise Terminated(signal)


if __name__ == "__main__":
    # Move to a new process group and kill it at exit so that we don't have any
    # infinite tests processes left
    # (new process group is required to avoid killing some parent processes)
    os.setpgid(0, 0)

    check_name = os.environ.get("CHECK_NAME")
    if check_name and "aarch" in check_name:
        multiprocessing.set_start_method("spawn")
        print(
            'The multiprocessing start method is set to "spawn". '
            "It is the default start method on macOS and should be tested in CI as well."
        )

    signal.signal(signal.SIGTERM, signal_handler)
    signal.signal(signal.SIGINT, signal_handler)
    signal.signal(signal.SIGHUP, signal_handler)

    try:
        args = parse_args()
    except Exception as e:
        print(e, file=sys.stderr)
        sys.exit(1)

    # When using --record, disable engine replacement to ensure we record
    # the original test's output, not a modified version
    if args.record:
        if args.replace_replicated_with_shared or args.replace_non_replicated_with_shared:
            print("INFO: Disabling engine replacement (--replace-replicated-with-shared, --replace-non-replicated-with-shared)", file=sys.stderr)
            print("      when using --record to ensure reference files are updated with original test output.", file=sys.stderr)
        args.replace_replicated_with_shared = False
        args.replace_non_replicated_with_shared = False

    # Collect explicit settings (if any)
    args.fixed_settings = {}
    args.fixed_merge_tree_settings = {}
    try:
        if getattr(args, "settings_file", None):
            args.fixed_settings.update(
                load_settings_from_file(args.settings_file, kind="settings")
            )
        if getattr(args, "settings", None):
            args.fixed_settings.update(parse_settings_cli_blob(args.settings))
        if getattr(args, "merge_tree_settings_file", None):
            args.fixed_merge_tree_settings.update(
                load_settings_from_file(args.merge_tree_settings_file, kind="merge-tree settings")
            )
        if getattr(args, "merge_tree_settings", None):
            args.fixed_merge_tree_settings.update(
                parse_settings_cli_blob(args.merge_tree_settings)
            )
    except Exception as _e:
        print(f"Failed to parse provided settings: {_e}", file=sys.stderr)
        sys.exit(1)

    if args.queries and not os.path.isdir(args.queries):
        print(
            f"Cannot access the specified directory with queries ({args.queries})",
            file=sys.stderr,
        )
        assert False, "No --queries provided"

    CAPTURE_CLIENT_STACKTRACE = args.capture_client_stacktrace

    # Autodetect the directory with queries if not specified
    if args.queries is None:
        args.queries = "queries"

    if not os.path.isdir(args.queries):
        # If we're running from the repo
        args.queries = os.path.join(
            os.path.dirname(os.path.abspath(__file__)), "queries"
        )

    if not os.path.isdir(args.queries):
        # Next we're going to try some system directories, don't write 'stdout' files into them.
        if args.tmp is None:
            args.tmp = "/tmp/clickhouse-test"

        args.queries = "/usr/local/share/clickhouse-test/queries"

    if not os.path.isdir(args.queries):
        args.queries = "/usr/share/clickhouse-test/queries"

    if not os.path.isdir(args.queries):
        args.queries = "/repo/tests/queries"

    if not os.path.isdir(args.queries):
        print(
            "Failed to detect path to the queries directory. Please specify it with "
            "'--queries' option.",
            file=sys.stderr,
        )
        sys.exit(1)

    print("Using queries from '" + args.queries + "' directory")

    if args.tmp is None:
        args.tmp = args.queries

    args.client = find_clickhouse_command(args.binary, "client")

    if args.extract_from_config:
        print(
            "WARNING: --extract_from_config option is deprecated and will be removed the the future",
            file=sys.stderr,
        )
    args.extract_from_config = find_clickhouse_command(
        args.binary, "extract-from-config"
    )

    if args.configclient:
        args.client += " --config-file=" + args.configclient

    tcp_host = os.getenv("CLICKHOUSE_HOST")
    if tcp_host is not None:
        args.tcp_host = tcp_host
        args.client += f" --host={tcp_host}"
    else:
        args.tcp_host = "localhost"

    if args.cloud:
        args.secure = True

    tcp_port = os.getenv("CLICKHOUSE_PORT_TCP")
    if tcp_port is not None:
        args.tcp_port = int(tcp_port)
        args.client += f" --port={tcp_port}"
    else:
        args.tcp_port = 9440 if args.secure else 9000
        if args.secure:
            os.environ["CLICKHOUSE_PORT_TCP"] = str(args.tcp_port)

    http_port = os.getenv("CLICKHOUSE_PORT_HTTP")
    if http_port is not None:
        args.http_port = int(http_port)
    else:
        args.http_port = 8443 if args.secure else 8123
        os.environ["CLICKHOUSE_PORT_HTTP"] = str(args.http_port)

    if args.secure and os.getenv("CLICKHOUSE_PORT_HTTP_PROTO") is None:
        os.environ["CLICKHOUSE_PORT_HTTP_PROTO"] = "https"

    client_database = os.getenv("CLICKHOUSE_DATABASE")
    if client_database is not None:
        args.client += f" --database={client_database}"
        args.client_database = client_database
    else:
        args.client_database = "default"

    if args.upgrade_check:
        args.client += " --fake-drop"

    if args.client_option or args.secure:
        # Set options for client
        if "CLICKHOUSE_CLIENT_OPT" in os.environ:
            os.environ["CLICKHOUSE_CLIENT_OPT"] += " "
        else:
            os.environ["CLICKHOUSE_CLIENT_OPT"] = ""

        print('---------')
        print(os.environ["CLICKHOUSE_CLIENT_OPT"])
        print(get_additional_client_options(args))
        os.environ["CLICKHOUSE_CLIENT_OPT"] += get_additional_client_options(args)
        print('---------')

        if args.secure:
            os.environ["CLICKHOUSE_CLIENT_OPT"] += " --secure "

    if args.cloud:
        enable_analyzer = os.getenv("CLOUD_ENABLE_ANALYZER", "1")
        enable_pr = os.getenv("CLOUD_ENABLE_PARALLEL_REPLICAS", "0")
        print("analyzer:", enable_analyzer)
        print("pr:", enable_pr)
        query_params = {
            # These two are enabled by default in dev env
            "allow_experimental_analyzer": enable_analyzer,
            "allow_experimental_parallel_reading_from_replicas": enable_pr,
            "allow_suspicious_types_in_order_by": 1,
            "distributed_ddl_output_mode": "none",
            # DETACH TABLE is not allowed for Replicated databases.
            #   Use DETACH TABLE PERMANENTLY or SYSTEM RESTART REPLICA or
            #   set database_replicated_always_detach_permanently to 1.
            "database_replicated_always_detach_permanently": 1,
            # Break some tests
            "async_insert_deduplicate": 0,
            "insert_deduplicate": 0,
            "alter_sync": 2,
            "mutations_sync": 2,
            # Test 00765_locate.sql
            "function_locate_has_mysql_compatible_argument_order": 1,
            "input_format_try_infer_datetimes_only_datetime64": "false",
            # check setting if analyzer enabled, so we will test 2 outcomes
            # it's behaviour doesn't depend on analyzer
            "database_replicated_allow_replicated_engine_arguments": 0,
        }
        cloud_settings = " ".join([f"--{k}={v}" for k, v in query_params.items()])

        os.environ["CLICKHOUSE_CLIENT_OPT"] = (
            cloud_settings + os.environ["CLICKHOUSE_CLIENT_OPT"] + " "
        )

        args.replace_replicated_with_shared = False
        args.replace_non_replicated_with_shared = False
        args.server_logs_level = "error"

        # Set options for curl
        if "CLICKHOUSE_URL_PARAMS" in os.environ:
            os.environ["CLICKHOUSE_URL_PARAMS"] += "&"
        else:
            os.environ["CLICKHOUSE_URL_PARAMS"] = ""

        cloud_settings_query = "&".join([f"{k}={v}" for k, v in query_params.items()])
        os.environ["CLICKHOUSE_URL_PARAMS"] += cloud_settings_query + "&"

        client_options_query_str = get_additional_client_options_url(args)
        args.client_options_query_str = client_options_query_str + "&"
        args.client_options_query_str += os.environ["CLICKHOUSE_URL_PARAMS"]
        os.environ["CLICKHOUSE_URL_PARAMS"] += client_options_query_str
    else:
        args.client_options_query_str = ""

    if args.jobs is None:
        args.jobs = multiprocessing.cpu_count()

    if args.db_engine and args.db_engine == "Ordinary":
        MESSAGES_TO_RETRY.append(" locking attempt on ")

    if args.replace_replicated_with_shared:
        args.s3_storage = True

    if args.encrypted_storage and not args.s3_storage and not args.azure_blob_storage:
        print(
            "Disabling encrypted storage. To run tests with encrypted storage, either s3_storage or azure_blob_storage should be enabled."
        )
        args.encrypted_storage = False

    try:
        main(args)
    except ServerDied as e:
        print(f"{e}", file=sys.stderr)
        sys.exit(1)
    except Terminated as e:
        print(f"Terminated with {e.signal} signal", file=sys.stderr)
        sys.exit(128 + e.signal)
    except KeyboardInterrupt:
        print("Interrupted")
        sys.exit(128 + signal.SIGINT)
