#!/usr/bin/python3
# pylint: disable=too-many-lines

"""Foomuuri - Multizone bidirectional nftables firewall.

Copyright 2023-2025, Kim B. Heino, Foobar Oy <b@bbbs.net>

License: GPL-2.0-or-later
"""

import collections
import concurrent.futures
import datetime
import ipaddress
import itertools
import json
import os
import pathlib
import re
import select
import shlex
import signal
import socket
import subprocess
import sys
import syslog
import time
import unicodedata
import dbus
import dbus.mainloop.glib
import dbus.service
from gi.repository import GLib

# requests is optional, used by iplist http download
try:
    import requests
    HAVE_REQUESTS = True
except ImportError:
    HAVE_REQUESTS = False

# lxml is optional, used by iplist xml and html filters
try:
    import lxml.etree
    HAVE_LXML = True
except ImportError:
    HAVE_LXML = False

# SystemD notify support is optional
try:
    from systemd.daemon import notify
    HAVE_NOTIFY = True
except ImportError:
    HAVE_NOTIFY = False


VERSION = '0.29'

CONFIG = {
    # Parsed foomuuri{} from config files
    'log_rate': '1/second burst 3',
    'log_input': 'yes',
    'log_output': 'yes',
    'log_forward': 'yes',
    'log_rpfilter': 'yes',
    'log_invalid': 'no',
    'log_smurfs': 'no',
    'log_prefix': '$(szone)-$(dzone) $(statement)',
    'log_level': 'level info flags skuid',
    'localhost_zone': 'localhost',
    'dbus_zone': 'public',
    'rpfilter': 'yes',
    'counter': 'no',
    'set_size': '65535',
    'recursion_limit': '10000',
    'priority_offset': '5',
    'dbus_firewalld': 'no',
    'nft_bin': 'nft',
    'try-reload_timeout': '15',

    # Directories and files. Files are relative to state_dir.
    'etc_dir': '/etc/foomuuri',
    'share_dir': '/usr/share/foomuuri',
    'state_dir': '/var/lib/foomuuri',
    'run_dir': '/run/foomuuri',
    'good_file': 'good.fw',
    'next_file': 'next.fw',
    'dbus_file': 'dbus.fw',
    'resolve_file': 'resolve.fw',  # old iplist/resolve
    'iplist_file': 'iplist.fw',  # old iplist/resolve
    'iplist_manual_file': 'iplist-manual.fw',  # old iplist/resolve
    'iplist_cache_file': 'iplist-cache.json',
    'iplist_apply_file': 'iplist-cache.fw',
    'zone_file': 'zone',
    'monitor_statistics_file': 'monitor.statistics',

    # Parsed command line parameters - used internally
    'command': '',
    'parameters': [],
    'root_power': True,
    'verbose': 0,
    'force': 0,
    'fork': 0,
    'syslog': 0,
}

OUT = []       # Generated nftables ruleset / commands
LOGRATES = {}  # Lograte names and limits
HELPERS = []   # List of helpers: (helper-object, protocol, ports)


def fail(error=None, fatal=True):
    """Exit with error message."""
    if error and CONFIG['verbose'] >= -1:  # Double-quiet will supress output
        print(f'Error: {error}', flush=True)
        if CONFIG['syslog']:
            syslog.syslog(syslog.LOG_ERR, f'Error: {error}')
    if fatal:
        sys.exit(1)


def warning(text):
    """Print warning message."""
    if CONFIG['verbose'] >= 0:
        print(f'Warning: {text}', flush=True)
        if CONFIG['syslog']:
            syslog.syslog(syslog.LOG_WARNING, f'Warning: {text}')


def verbose(line, level=1):
    """Print line if --verbose was given in command line."""
    if CONFIG['verbose'] >= level:
        print(line, flush=True)
        if CONFIG['syslog']:
            syslog.syslog(syslog.LOG_NOTICE, line)


def out(line):
    """Add single line to ruleset."""
    OUT.append(line)


def run_program_rc(args, *, env=None, print_output=True, quiet=False):
    """Run external program and return its errorcode. Print its output."""
    if not args:
        return 0
    verbose(' '.join(map(str, args)))
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              env=env, timeout=60)
    except (OSError, subprocess.TimeoutExpired) as error:
        fail(f'Failed to run command "{shlex.join(args)}": {error}', False)
        return 1
    output = proc.stdout.rstrip()
    if (
            output and
            (proc.returncode or print_output or CONFIG['verbose'] > 0) and
            not quiet
    ):
        verbose(output, 0)
    return proc.returncode


def run_program_pipe(args, text_input):
    """Run external program with stdin, return its output."""
    if not args:
        return ''
    try:
        proc = subprocess.run(args, check=False, input=text_input,
                              stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT,
                              encoding='utf-8', timeout=60)
    except (OSError, subprocess.TimeoutExpired) as error:
        fail(f'Failed to run command "{" ".join(args)}": {error}')
    if proc.returncode:
        warning(f'Failed to run command "{" ".join(args)}": '
                f'return code {proc.returncode}')
        return None
    return proc.stdout


def run_program_shell(args, fileline):
    """Run external program as shell command and return its output.

    Command failure is fatal. Parameter args must be a string, not list.
    """
    if not args:
        return ''
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              shell=True, timeout=60)
    except (OSError, subprocess.TimeoutExpired) as error:
        fail(f'{fileline}Failed to run command "{args}": {error}')
    if proc.returncode:
        fail(f'{fileline}Failed to run command "{args}": '
             f'return code {proc.returncode}')
    return proc.stdout.rstrip()


def nft_command(cmd, **kwargs):
    """Run "nft cmd", wrapper to run_program_rc()."""
    return run_program_rc(CONFIG['_nft_bin'] + [cmd], **kwargs)


def nft_json(cmd):
    """Run "nft --json cmd", return its output as json, ignore errors."""
    if not cmd:
        return {}
    args = CONFIG['_nft_bin'] + ['--json', cmd]
    verbose(' '.join(map(str, args)), 2)
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              timeout=60)
    except (OSError, subprocess.TimeoutExpired):
        return None
    if proc.returncode:
        return None
    try:
        verbose(proc.stdout, 2)
        return json.loads(proc.stdout)
    except json.decoder.JSONDecodeError:
        return None


def daemonize():
    """Fork as a background daemon. Silently ignore errors."""
    # Enabled in config?
    if not CONFIG['fork']:
        return
    CONFIG['fork'] = 0  # Fork only once!

    # Fork #1
    try:
        pid = os.fork()
        if pid > 0:
            sys.exit(0)
    except OSError:
        return

    # Run in a new session. Don't do "os.chdir('/')" so that relative paths
    # work in development.
    os.umask(0)
    os.setsid()

    # Fork #2
    try:
        pid = os.fork()
        if pid > 0:
            sys.exit(0)
    except OSError:
        return

    # Redirect stdin/out/err
    for stream in (sys.stdin, sys.stdout, sys.stderr):
        devnull = os.open(os.devnull, os.O_RDWR)
        os.dup2(devnull, stream.fileno())


def shell_expansion(content, fileline):
    """Expand $(shell command) in configuration file.

    This is the first expansion done. Failure in command is fatal.
    """
    while '$(shell ' in content:
        prefix, postfix = content.split('$(shell ', 1)
        if ')' not in postfix:
            postfix = postfix.splitlines()[0]
            fail(f'{fileline}"$(shell" without ")" in command: {postfix}')
        shell, postfix = postfix.split(')', 1)
        content = f'{prefix}{run_program_shell(shell, fileline)}{postfix}'
    return content


def find_config_files(basedir, mask):
    """Find all config files in basedir, ignoring hidden and backups."""
    files = sorted(basedir.rglob(mask))
    return [item for item in files if not item.name.startswith(('.', '#'))]


def read_config():
    """Read all config files to config dict: section -> lines[].

    Files are read in alphabetical order, ignoring backup and hidden files.
    """
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements

    # Find all config files
    share_config = find_config_files(CONFIG['_share_dir'], '*.conf')
    etc_config = find_config_files(CONFIG['_etc_dir'], '*.conf')
    if not etc_config:
        fail(f'No configuration files "{CONFIG["_etc_dir"]}/*.conf" found\n'
             '\n'
             'See https://github.com/FoobarOy/foomuuri/wiki/Host-Firewall '
             'for example\n'
             'configuration.')

    # There characters will combine to single word in shlex
    wordchars = ''.join(chr(letter) for letter in range(33, 256)
                        # excludes: " # ' ; { }
                        if letter not in (34, 35, 39, 59, 123, 125))

    # Read all config files
    config = {}        # Final config dict
    section = None     # Currently open section name
    section_line = {}  # Section_name -> filename_line for error messages
    for filename in share_config + etc_config:
        try:
            content = filename.read_text(encoding='utf-8')
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')

        # Expand $(shell in configuration file. Do this for whole file instead
        # of single line so that command can return multiple lines.
        content = shell_expansion(content, f'File {filename}: ')

        # Parse single config file content
        continuation = ''
        for linenumber, line in enumerate(content.splitlines()):
            if line == '# foomuuri: not-conf':
                break  # sysctl's 50-foomuuri.conf is not my config

            # Combine lines if there is \ at end of line
            if line.endswith('\\'):
                continuation += line[:-1] + ' '
                continue
            line = continuation + line
            continuation = ''

            # Parse single line to list of words. Keep " as is, it can be
            # used to avoid macro expansion.
            fileline = f'File {filename} line {linenumber + 1}: '
            try:
                lexer = shlex.shlex(line, punctuation_chars=';{')
                lexer.wordchars = wordchars
                tokens = list(lexer)
            except ValueError as error:
                fail(f'{fileline}Can\'t parse line: {error}')
            if not tokens:
                continue

            # "}" is end of section
            if len(tokens) == 1 and tokens[0] == '}':  # End of section
                if not section:
                    fail(f'{fileline}Extra "}}"')
                section = None

            # Section start:
            # - "foo {"
            # - "template foo {" / "target foo" / "group foo"
            # - "prerouting {"
            # - "prerouting filter mangle - 10 {"
            elif len(tokens) >= 2 and tokens[-1] == '{':
                if section:
                    fail(f'{fileline}New "{" ".join(tokens)}" while section '
                         f'"{section}" is still open')
                if (
                        tokens[0] in ('template', 'target', 'group') and
                        len(tokens) != 3
                ):
                    fail(f'{fileline}Section "{tokens[0]}" must have single '
                         f' word name: {" ".join(tokens)}')
                if len(tokens) > 2 and tokens[0] not in (
                        'template', 'target', 'group',
                        'snat', 'dnat',
                        'prerouting', 'postrouting',
                        'forward', 'input', 'output'):
                    fail(f'{fileline}Section "{tokens[0]}" does not take '
                         f'parameters: {" ".join(tokens)}')

                section = ' '.join(tokens[:-1])
                if section.startswith('_'):  # _name is protected
                    fail(f'{fileline}Unknown section: {section}')
                if section not in config:
                    config[section] = []
                    section_line[section] = fileline

            # "foo" which is not inside section
            elif not section:
                fail(f'{fileline}Unknown line: {" ".join(tokens)}')

            # "foo" inside section
            else:
                config[section].append((fileline, tokens))

        # End of file checks
        if continuation:
            fail(f'File {filename}: Continuation "\\" at end of file')
        if section:
            fail(f'File {filename}: Section "{section}" is missing "}}" at '
                 f'end of file')

    # Include section_name -> filename_line to config for error messages
    config['_section_line'] = section_line
    return config


def config_to_pathlib():
    """Convert str paths in CONFIG{} to pathlib.Paths."""
    # "*_dir" are needed for reading config files
    keys = [key for key in CONFIG if not key.startswith('_')]
    for key in keys:
        if key.endswith('_dir'):
            CONFIG[f'_{key}'] = pathlib.Path(CONFIG[key])

    # "*_file" are needed to save current state
    for key in keys:
        if key.endswith('_file'):
            CONFIG[f'_{key}'] = CONFIG['_state_dir'] / CONFIG[key]

    # "*_bin" are binaries with optional arguments
    for key in keys:
        if key.endswith('_bin'):
            CONFIG[f'_{key}'] = shlex.split(CONFIG[key])


def parse_config_templates(config, macros, macroline):
    """Parse "template foo { ... }" rules and convert them to macros."""
    names = [item for item in config if item.startswith('template ')]
    for name in names:
        lines = config.pop(name)
        fileline = config['_section_line'][name]
        if not lines:
            fail(f'{fileline}Template "{name[9:]}" is empty')
        macro_name = f'_template_{name[9:]}'
        macroline[macro_name] = fileline
        macros[macro_name] = []
        for line in lines:
            if macros[macro_name]:
                macros[macro_name].append(';')
            macros[macro_name].extend(line[1])


def parse_config_macros(config):
    """Parse macro{} from config. Recursively expand macro in macro{}."""
    # Parse macro{} to dict
    macros = {}
    macroline = {}
    parse_config_templates(config, macros, macroline)
    for fileline, macro in config.pop('macro', []):
        key = macro[0]
        value = macro[1:]
        if not value:
            fail(f'{fileline}Macro "{key}" does not have value')
        macroline[key] = fileline
        if value[0] == '+':  # append
            macros[key] = macros.get(key, []) + value[1:]
        else:
            if CONFIG['command'] == 'check' and macros.get(key):
                warning(f'{fileline}Overwriting macro "{key}" '
                        f'with value "{" ".join(value)}"')
            macros[key] = value  # overwrite

    # Expand macro in macro{}. Keep going as long as there was some expansion
    # done.
    while True:
        found = False
        for check, cvalue in macros.items():
            for macro, mvalue in macros.items():
                try:
                    pos = mvalue.index(check)  # Full word expansion only
                except ValueError:
                    continue
                if check == macro:  # Macro "foo" expands to "foo bar"
                    fail(f'{macroline[macro]}Macro "{macro}" expands to '
                         f'itself: {" ".join(mvalue)}')
                # Expand macro
                macros[macro] = mvalue[:pos] + cvalue + mvalue[pos + 1:]
                found = True
        if not found:  # No new expansion was done
            return macros


def macro_isdigit(word, separator):
    """Check if word contains number after separator."""
    if word.count(separator) != 1:
        return False
    return word.split(separator)[1].isdigit()


def change_template_to_macro_in_rule(fileline, line, macros):
    """Change "template foo" to "_template_foo"."""
    pos = 0
    while pos < len(line) - 1:
        if line[pos] == 'template':
            del line[pos]
            macro_name = f'_template_{line[pos]}'
            if macro_name not in macros:
                fail(f'{fileline}Unknown template name: {line[pos]}')
            line[pos] = macro_name
        pos += 1


def expand_single_line(fileline, line, macros):
    """Expand first macro in line. Repeat call to expand all.

    Expansion can return multiple lines.
    Returns None if no expansion was done.
    """
    # Change "template foo" to macro call
    change_template_to_macro_in_rule(fileline, line, macros)

    # Iterate words in line
    for pos, word in enumerate(line):
        # Cleanup "-macro", "macro/24", "[macro]:123" and "macro:123"
        # to prefix/macro/suffix parts
        word_prefix = word_suffix = ''
        if word[0] == '-':  # Negative IP address
            word = word[1:]
            word_prefix = '-'
        if macro_isdigit(word, '/'):  # Netmask
            word, word_suffix = word.split('/')
            word_suffix = f'/{word_suffix}'
        if (
                word[0] == '[' and
                macro_isdigit(word, ']:') and
                not word_prefix and
                not word_suffix
        ):  # [IPv6]:port
            word, word_suffix = word[1:].split(']:')
            word_prefix = '['
            word_suffix = f']:{word_suffix}'
        if macro_isdigit(word, ':') and not word_suffix:  # IPv4:port
            word, word_suffix = word.split(':')
            word_suffix = f':{word_suffix}'

        # Check cleaned value
        if word not in macros:
            continue

        # Found, add prefix/suffix to all macro's value
        mvalue = []
        for item in macros[word]:
            if item == ';':
                mvalue.append(item)
            else:
                mvalue.append(f'{word_prefix}{item}{word_suffix}')

        # Expand mvalue to line and return list of expanded lines
        prefix = line[:pos]
        suffix = line[pos + 1:]
        return [(fileline, prefix + list(group) + suffix)
                for is_split, group in itertools.groupby(
                        mvalue, lambda spl: spl == ';') if not is_split]
    return None


def expand_macros(config):
    """Expand all macros in all config sections."""
    macros = parse_config_macros(config)
    for section, orig_lines in config.items():
        if section in ('foomuuri', 'zone') or section.startswith('_'):
            continue  # Don't expand in these sections

        new_lines = []
        recursion_limit = collections.Counter()
        while orig_lines:
            # Get next line and expand macros there
            fileline, line = orig_lines.pop(0)
            expanded = expand_single_line(fileline, line, macros)

            # Repeat call if some expansion was done
            if expanded:
                orig_lines = expanded + orig_lines
            else:  # Not found, go to next line
                new_lines.append((fileline, line))

            # Check for expansion loop
            recursion_limit.update([fileline])
            if recursion_limit[fileline] > int(CONFIG['recursion_limit']):
                fail(f'{fileline}Possible macro or template loop '
                     f'in "{section}": {" ".join(line[:10])} ...')
        config[section] = new_lines


def remove_quotes(config):
    """Change "foo" to foo in config entries.

    This is called after macro expansion so that '"ssh"' is 'ssh', not
    'tcp 22'.
    """
    for section, lines in config.items():
        if section.startswith('_'):
            continue
        for _dummy_fileline, line in lines:
            for index, item in enumerate(line):
                if item.startswith('"') and item.endswith('"'):  # "foo"
                    line[index] = item[1:-1]
                elif item.startswith("'") and item.endswith("'"):  # 'foo'
                    line[index] = item[1:-1]


def parse_config_foomuuri(config):
    """Parse foomuuri{} from config to CONFIG{}."""
    for fileline, line in config.pop('foomuuri', []):
        name = line[0]
        value = ' '.join(line[1:])
        if name not in CONFIG:
            fail(f'{fileline}Unknown foomuuri{{}} option: {" ".join(line)}')
        if value.startswith('+ '):  # append
            CONFIG[name] = f'{CONFIG[name]} {value[2:]}'
        else:
            CONFIG[name] = value  # overwrite
    config_to_pathlib()  # Redo as config files can change these

    # Convert chain priority offset to nft. It is already converted on D-Bus
    # handler reload.
    if not CONFIG['priority_offset']:
        priority = 0
    else:
        try:
            priority = int(CONFIG['priority_offset'].replace(' ', ''))
        except ValueError:
            fail(f'Invalid foomuuri{{}} priority_offset: '
                 f'{CONFIG["priority_offset"]}')
    if priority == 0:
        CONFIG['priority_offset'] = ''
    elif priority > 0:
        CONFIG['priority_offset'] = f' + {priority}'
    else:
        CONFIG['priority_offset'] = f' - {-priority}'

    # Add "packets" to log rates
    for key in list(CONFIG):
        if (
                key.startswith('log_') and
                re.match(r'^\d+/(second|minute|hour) burst \d+$', CONFIG[key])
        ):
            CONFIG[key] += ' packets'


def check_name(name, fileline, prefix=''):
    """Check that name starts with letter, not number."""
    if not re.fullmatch(r'[a-zA-Z_]', name[:1]):
        fail(f'{fileline}Invalid name: {prefix}{name}')


def parse_config_zones(config):
    """Parse zone{} from config."""
    zones = {}
    for fileline, line in config.pop('zone', []):
        check_name(line[0], fileline)
        if line[0] in zones:
            fail(f'{fileline}Zone is already defined: {line[0]}')
        zones[line[0]] = {'interface': line[1:]}
    return zones


def parse_config_zonemap(config):
    """Parse zonemap{} rules from config."""
    zonemap = []
    for fileline, line in config.pop('zonemap', []):
        rule = parse_rule_line((fileline, line))
        if not rule['new_dzone'] and not rule['new_szone']:
            fail(f'{fileline}Zonemap without "new_dzone" or "new_szone" '
                 f'is a no-op: {" ".join(line)}')
        zonemap.append(rule)
    return zonemap


def parse_config_special_chains(config):
    """Parse snat{}, dnat{}, prerouting{} etc. rules from config."""
    chain_rules = {
        # output+mangle is needed for mark restore in output_special_chains()
        ('output', 'route', 'mangle' + CONFIG['priority_offset']): [],
    }
    for prefix, def_type, def_priority in (
            ('snat', 'nat', 'srcnat'),
            ('dnat', 'nat', 'dstnat'),
            ('prerouting', 'filter', 'mangle'),
            ('postrouting', 'filter', 'mangle'),
            ('forward', 'filter', 'mangle'),
            ('input', 'filter', 'mangle'),
            ('output', 'route', 'mangle'),
            ('invalid', None, None),
            ('rpfilter', None, None),
            ('smurfs', None, None)):
        for section in list(config):
            if not (section == prefix or section.startswith(f'{prefix} ')):
                continue

            fileline = config['_section_line'][section]
            items = section.split(' ', 2)
            if len(items) == 2:
                fail(f'{fileline}Invalid section: type or priority missing: '
                     f'{section}')

            if len(items) == 1:
                ftype = def_type
                priority = None
                if def_priority:
                    priority = def_priority + CONFIG['priority_offset']
            else:
                ftype = items[1]
                priority = items[2]
                if ftype not in ('filter', 'nat', 'route'):
                    fail(f'{fileline}Invalid section type: {section}')

            lines = config.pop(section)
            chain_rules[(prefix, ftype, priority)] = [parse_rule_line(line)
                                                      for line in lines]
    return chain_rules


def parse_iplist_names(config):
    """Return list of resolve{} or iplist{} set names from config."""
    ret = set()
    for section in ('iplist', 'resolve'):
        for fileline, line in config.get(section, []):
            if line[0].startswith(('dns_timeout=', 'dns_refresh=',
                                   'url_timeout=', 'url_refresh=')):
                continue
            if line[0] in ('timeout', 'refresh'):
                continue
            if not line[0].startswith('@') or line[0] == '@':
                fail(f'{fileline}Invalid {section} name: {" ".join(line)}')
            check_name(line[0][1:], fileline, '@')
            ret.add(line[0])
    return ret


def parse_config_hook(config):
    """Parse hook{} from config."""
    for fileline, line in config.pop('hook', []):
        if line[0] not in (
                'pre_start', 'post_start',
                'pre_stop', 'post_stop',
        ):
            fail(f'{fileline}Unknown hook: {" ".join(line)}')
        CONFIG[line[0]] = line[1:]


def minimal_config():
    """Read and parse minimal config."""
    config = read_config()
    expand_macros(config)
    remove_quotes(config)
    parse_config_foomuuri(config)
    return config


def is_ipv4_address(value):
    """Is value IPv4 address, network or interval."""
    if value.count('-') == 1:  # Interval "IP-IP"
        addr_from, addr_to = value.split('-')
        return is_ipv4_address(addr_from) and is_ipv4_address(addr_to)
    try:  # Address "IP"
        return isinstance(ipaddress.ip_address(value), ipaddress.IPv4Address)
    except ValueError:
        try:  # Network "IP/mask"
            return isinstance(ipaddress.ip_network(value, strict=False),
                              ipaddress.IPv4Network)
        except ValueError:
            return False


def is_ipv6_address(value):
    """Is value IPv6 address, network or interval."""
    if value.count('/-') == 1:  # Suffix mask "IP/-mask"
        addr, maskstr = value.split('/-')
        try:
            mask = int(maskstr)
            return 0 <= mask <= 128 and is_ipv6_address(addr)
        except ValueError:
            return False

    if value.count('-') == 1:  # Interval "IP-IP"
        addr_from, addr_to = value.split('-')
        return is_ipv6_address(addr_from) and is_ipv6_address(addr_to)

    # Python's ipaddress library doesn't handle "[ipv6]" notation.
    # Strip [] before validating the address. nft handles [] fine, it will
    # strip them.
    if value.startswith('['):
        if value.endswith(']'):  # "[ipv6]"
            value = value[1:-1]
        elif ']/' in value:      # "[ipv6]/56"
            value = value[1:].replace(']/', '/')

    try:  # Address "IP"
        return isinstance(ipaddress.ip_address(value), ipaddress.IPv6Address)
    except ValueError:
        try:  # Network "IP/mask"
            return isinstance(ipaddress.ip_network(value, strict=False),
                              ipaddress.IPv6Network)
        except ValueError:
            return False


def is_ip_address(value):
    """Check if value is IPv4 or IPv6 address.

    Return 4, 6, or 0 if not detected.
    """
    if value.startswith('-'):  # Negative is handled in single_or_set()
        value = value[1:]
    if is_ipv4_address(value):
        return 4
    if is_ipv6_address(value):
        return 6
    return 0


def is_port(value, protocol):
    """Check if value is port: "1", "1-2" or "1,2", or any combination.

    This is used in parse_rule_line so protocol must specified.
    Allow special keywords for protocol icmp/icmpv6.
    """
    if not protocol:
        return False
    for item in value.split(','):
        if protocol in ('icmp', 'icmpv6') and (
                # See: "nft describe icmp type; nft describe icmpv6 type"
                item == 'redirect' or
                re.match(r'^[a-z2]{2,}-[-a-z]{5,}$', item)
        ):
            continue
        for number in item.split('-'):
            if not number.isnumeric():
                return False
    return True


def rule_item_only_one(rule, keyword, value):
    """Verify that keyword is set only once, or set to same value."""
    old = rule.get(f'_only_one_{keyword}', value)
    if old != value:
        fail(f'{rule["fileline"]}Rule\'s {keyword} is already set to '
             f'"{old}": {" ".join(rule["line"])}')
    rule[f'_only_one_{keyword}'] = value


def verify_rule_sanity(rule, fileline):
    """Do some basic verify that single rule is valid."""
    # pylint: disable=too-many-branches
    for key, value in rule.items():
        if value == '' and key not in ('to', 'queue', 'counter', 'log',
                                       'fileline', 'line'):
            fail(f'{fileline}"{key}" without value is not valid')

    for key in ('saddr_rate_name', 'daddr_rate_name', 'saddr_daddr_rate_name',
                'helper', 'counter', 'mss'):
        value = rule[key] or ''
        if ' ' in value:
            fail(f'{fileline}"{key}" must be single word: {value}')

    for key in ('global_rate', 'saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
        if not rule[key]:
            continue
        # Limit by packets:
        #
        # 3/second
        # 3/second burst 5
        # 3/second burst 5 packets
        # over 3/second
        # over 3/second burst 5
        # over 3/second burst 5 packets
        #
        # Limit by bytes:
        #
        # 10 mbytes/second
        # 10 mbytes/second burst 12000 kbytes
        # over 10 mbytes/second
        # over 10 mbytes/second burst 12000 kbytes
        #
        # Limit by connection count
        #
        # ct count 8
        # ct count over 8
        if re.match(r'^(over )?\d+/(second|minute|hour) burst \d+$',
                    rule[key]):
            rule[key] += ' packets'  # Add missing "packets"
        if not (
                re.match(r'^(over )?'
                         r'\d+/(second|minute|hour)'
                         r'( burst \d+ packets)?$', rule[key]) or
                re.match(r'^(over )?'
                         r'\d+ [km]?bytes/(second|minute|hour)'
                         r'( burst \d+ [km]?bytes)?$', rule[key]) or
                re.match(r'^ct count (over )?\d+$', rule[key])
        ):
            fail(f'{fileline}Invalid "{key}" value: {rule[key]}')

    for basic, extra in (
            ('protocol', 'sport'),
            ('protocol', 'dport'),
            ('saddr_rate', 'saddr_rate_name'),
            ('saddr_rate', 'saddr_rate_mask'),
            ('daddr_rate', 'daddr_rate_name'),
            ('daddr_rate', 'daddr_rate_mask'),
            ('saddr_daddr_rate', 'saddr_daddr_rate_name'),
            ('saddr_daddr_rate', 'saddr_daddr_rate_mask'),
    ):
        if rule[extra] and not rule[basic]:
            fail(f'{fileline}"{extra}" without "{basic}" is not valid')

    if not rule['to'] and rule['statement'] in ('snat', 'dnat',
                                                'snat_prefix', 'dnat_prefix'):
        fail(f'{fileline}"{rule["statement"]}" without address is not valid')

    if rule['ct_status'] and rule['ct_status'] not in (
            'expected', 'seen-reply', 'assured', 'confirmed',
            'snat', 'dnat', 'dying',
    ):
        fail(f'{fileline}"Invalid "ct_status" value: {rule["ct_status"]}')


def parse_rule_line(fileline_line):
    """Parse single config section line to rule dict.

    This parser is quite relaxed. Words can be in almost any order. For
    example, all following entries are equal:
      tcp 22 log           <- preferred
      tcp 22 accept log
      accept tcp 22 log
      log tcp accept 22
    """
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements
    fileline, line = fileline_line
    ret = {
        # Basic rules
        'statement': 'accept',
        'cast': 'unicast',
        'protocol': None,
        'saddr': None,
        'sport': None,
        'daddr': None,
        'dport': None,
        'oifname': None,
        'iifname': None,
        'mac_saddr': None,
        'mac_daddr': None,
        # Is this IPv4/6 specific rule?
        'ipv4': False,
        'ipv6': False,
        # Rate limits
        'global_rate': None,
        'saddr_rate': None,
        'saddr_rate_mask': None,
        'saddr_rate_name': None,
        'daddr_rate': None,
        'daddr_rate_mask': None,
        'daddr_rate_name': None,
        'saddr_daddr_rate': None,
        'saddr_daddr_rate_mask': None,
        'saddr_daddr_rate_name': None,
        # User limits
        'uid': None,
        'gid': None,
        # Zonemap specific rules
        'szone': None,
        'dzone': None,
        'new_szone': None,
        'new_dzone': None,
        # Misc rules
        'to': None,  # snat/dnat to
        'queue': None,  # optional queue flags
        'counter': None,
        'helper': None,
        'sipsec': None,
        'dipsec': None,
        'log': None,
        'log_level': None,
        'nft': None,
        'mss': None,
        'template': None,
        'tproxy': None,
        'mark_set': None,
        'mark_match': None,
        'priority_set': None,
        'priority_match': None,
        'dscp': None,
        'cgroup': None,
        'ct_status': None,
        'time': None,
        'after_conntrack': True,

        # Internal housekeeping
        'plain': True,  # Plain "log" or "counter" without anything else
        'fileline': fileline,  # For error messages
        'line': line,  # Original line for error messages
    }

    keyword = None
    for item in line:
        if item == ';':
            fail(f'{fileline}";" is not supported in rule, split it to '
                 f'separate lines: {" ".join(line)}')

        # "tcp 22" is shortcut for "tcp dport 22"
        if not keyword and is_port(item, ret['protocol']):
            keyword = 'dport'
            ret['plain'] = False
            if ret[keyword] is None:
                ret[keyword] = ''

        # Version 0.29 deprecates 'to' after 'snat' statement, ignore it
        if keyword == 'to' and not ret['to'] and item == 'to':
            continue

        # First item after start keyword is always a parameter for it, except
        # for "log" or "counter". Log will have good default value if not
        # defined. Counter without parameter will create anonymous counter.
        if keyword and not ret[keyword] and keyword not in ('log', 'counter'):
            ret[keyword] = item
            if keyword == 'protocol':  # Single word only
                keyword = None

        # Non-start keywords
        elif item in ('accept', 'drop', 'return', 'continue',
                      'masquerade', 'notrack'):
            rule_item_only_one(ret, 'statement', item)
            ret['statement'] = item
            ret['plain'] = False
            keyword = None
        elif item == 'reject':
            rule_item_only_one(ret, 'statement', item)
            ret['statement'] = 'reject with icmpx admin-prohibited'
            ret['plain'] = False
            keyword = None
        elif item in ('multicast', 'broadcast'):
            rule_item_only_one(ret, 'cast', item)
            ret['cast'] = item
            ret['plain'] = False
            keyword = None
        elif item in ('tcp', 'udp', 'icmp', 'icmpv6', 'igmp', 'esp'):
            # "igmp" and "esp" are for backward compability (v0.21)
            rule_item_only_one(ret, 'protocol', item)
            ret['protocol'] = item
            ret['plain'] = False
            keyword = None
        elif item in ('ipv4', 'ipv6'):
            ret[item] = True
            ret['plain'] = False
            keyword = None
        elif item in ('sipsec', 'dipsec'):
            ret[item] = 'exists'
            ret['plain'] = False
            keyword = None
        elif item in ('-sipsec', '-dipsec'):
            ret[item[1:]] = 'missing'
            ret['plain'] = False
            keyword = None
        elif item in ('conntrack', '-conntrack'):
            rule_item_only_one(ret, 'conntrack', item)
            ret['after_conntrack'] = item == 'conntrack'
            ret['plain'] = False
            keyword = None

        # Start keywords
        elif item in ('snat', 'dnat', 'snat_prefix', 'dnat_prefix'):
            rule_item_only_one(ret, 'statement', item)
            ret['statement'] = item
            ret['plain'] = False
            keyword = 'to'
            ret[keyword] = ret[keyword] or ''
        elif item in ('protocol',
                      'saddr', 'sport',
                      'daddr', 'dport',
                      'oifname', 'iifname',
                      'mac_saddr', 'mac_daddr',
                      'global_rate',
                      'saddr_rate', 'saddr_rate_mask', 'saddr_rate_name',
                      'daddr_rate', 'daddr_rate_mask', 'daddr_rate_name',
                      'saddr_daddr_rate', 'saddr_daddr_rate_mask',
                      'saddr_daddr_rate_name',
                      'uid', 'gid',
                      'szone', 'dzone',
                      'new_szone', 'new_dzone',
                      'counter',
                      'helper',
                      'log',
                      'log_level',
                      'nft',
                      'mss',
                      'template',
                      'queue',
                      'tproxy',
                      'mark_set',
                      'mark_match',
                      'priority_set',
                      'priority_match',
                      'dscp',
                      'cgroup',
                      'ct_status',
                      'time',
                      ):
            keyword = item
            ret[keyword] = ret[keyword] or ''
            if item == 'queue':  # statement and start keyword
                rule_item_only_one(ret, 'statement', item)
                ret['statement'] = item
            if item not in ('counter', 'log', 'log_level'):
                ret['plain'] = False

        # More parameters for keyword
        elif keyword:
            if ret[keyword]:
                ret[keyword] += ' '
            ret[keyword] += item

        # Unknown word after non-start keyword
        else:
            fail(f'{fileline}Can\'t parse line: {" ".join(line)}')

    # Use no-op statement "continue" for plain "log" or "counter" rule,
    # everything else defaults to "accept". Also mark them as -conntrack
    # so that they really log/count everything.
    if ret['plain']:
        ret['statement'] = 'continue'
        ret['after_conntrack'] = False
    verify_rule_sanity(ret, fileline)
    return ret


def parse_config_rules(config):
    """Parse "zone-zone" rules from config.

    All other sections must be already parsed and removed from config.
    """
    rules = {}
    for section, lines in config.items():
        if section.startswith('_') or section in ('resolve', 'iplist'):
            continue
        try:
            szone, dzone = section.split('-')
        except ValueError:
            fail(f'{config["_section_line"][section]}Unknown section: '
                 f'{section}')
        rules[(szone, dzone)] = [parse_rule_line(line) for line in lines]
    return rules


def filter_any_zonelist(rule, srcdst):
    """Parse rule[szone] list and return pos/neg boolean + zonelist."""
    if not rule[srcdst]:
        return False, []   # "not in empty list" == everything

    inside = True
    zonelist = []
    for item in rule[srcdst].split():
        if item.startswith('-'):
            if inside and zonelist:
                fail(f'{rule["fileline"]}Can\'t mix "+" and "-" items: '
                     f'{rule[srcdst]}')
            inside = False
            zonelist.append(item[1:])
        else:
            if not inside:
                fail(f'{rule["fileline"]}Can\'t mix "+" and "-" items: '
                     f'{rule[srcdst]}')
            zonelist.append(item)
    return inside, zonelist


def insert_single_any(any_rules, rules, szone, dzone):
    """Insert single any_rules to rules[(szone, dzone)]."""
    if szone == dzone == CONFIG['localhost_zone']:
        return  # Don't insert to localhost-localhost

    # Filter out "szone -public" when adding to zone "public-xxx"
    filtered = []
    for rule in any_rules:
        inside, zonelist = filter_any_zonelist(rule, 'szone')
        if (szone in zonelist) != inside:
            continue
        inside, zonelist = filter_any_zonelist(rule, 'dzone')
        if (dzone in zonelist) != inside:
            continue
        filtered.append(rule)

    # Insert filtered rules to beginning
    if filtered:
        rules[(szone, dzone)] = filtered + rules.get((szone, dzone), [])


def insert_any_zones(zones, rules):
    """Insert "any-zone", "zone-any" and "any-any" rules to "zone-zone" rules.

    These are inserted to beginning of zone-zone rules.
    """
    for zone in zones:
        any_rules = rules.pop(('any', zone), [])  # any-zone
        for szone in zones:
            insert_single_any(any_rules, rules, szone, zone)

        any_rules = rules.pop((zone, 'any'), [])  # zone-any
        for dzone in zones:
            insert_single_any(any_rules, rules, zone, dzone)

    any_rules = rules.pop(('any', 'any'), [])  # any-any
    for szone in zones:
        for dzone in zones:
            insert_single_any(any_rules, rules, szone, dzone)


def verify_config(config, zones, rules):
    """Verify config data."""
    if not zones:
        fail('No zones defined in section zone{}')

    localhost = CONFIG['localhost_zone']
    if localhost not in zones:
        zones[localhost] = {'interface': []}
        warning(f'{config["_section_line"]["zone"]}Zone "{localhost}" '
                f'is missing from zone{{}}, adding it')

    if zones[localhost]['interface']:
        fail(f'{config["_section_line"]["zone"]}Zone "{localhost}" has '
             f'interfaces "{" ".join(zones[localhost]["interface"])}", '
             f'it must be empty')

    if CONFIG['dbus_zone'] not in zones:
        warning(f'Config option dbus_zone value '
                f'"{CONFIG["dbus_zone"]}" is missing from zone{{}}')

    # All zone-zone pairs must be known
    for szone, dzone in rules:
        if szone not in zones or dzone not in zones:
            fileline = config['_section_line'][f'{szone}-{dzone}']
            fail(f'{fileline}Unknown zone-zone: {szone}-{dzone}')

    # Make sure all zone-zone pairs are defined. They are needed for
    # "ct established" return packets and for dynamic interface-to-zone
    # binding via D-Bus.
    # Add final rule to all zone-zone pairs, even if there already is
    # one. It will be optimized out later.
    for szone in zones:
        for dzone in zones:
            if (szone, dzone) not in rules:
                rules[(szone, dzone)] = []
            if szone == dzone == localhost:
                rule = ['accept']  # localhost-localhost is accept
            elif szone == localhost:
                rule = ['reject', 'log']  # localhost-foo is reject
            else:
                rule = ['drop', 'log']  # everything else is drop
            rules[(szone, dzone)].append(parse_rule_line(('', rule)))


def output_rate_names(rules):
    """Output empty saddr_rate sets to ruleset."""
    counter = 1
    already_added = set()
    for rulelist in rules.values():
        for rule in rulelist:
            for rate in ('saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
                if not rule[rate]:
                    continue

                # Rule with rate found. It can be pre-named or anonymous.
                setname = rule[f'{rate}_name']
                if setname in already_added:
                    continue  # Pre-named and already added
                if not setname:  # Anonymous - invent a name for it
                    setname = rule[f'{rate}_name'] = f'_rate_set_{counter}'
                    counter += 1
                already_added.add(setname)

                # Output empty sets for IPv4 and IPv6. These will have
                # one minute timeout.
                for ipv in (4, 6):
                    out(f'set {setname}_{ipv} {{')
                    if rate == 'saddr_daddr_rate':
                        out(f'type ipv{ipv}_addr . ipv{ipv}_addr')
                    else:
                        out(f'type ipv{ipv}_addr')
                    out(f'size {CONFIG["set_size"]}')
                    if rule[rate].startswith('ct '):
                        out('flags dynamic')
                    else:
                        timeout = '1h' if 'hour' in rule[rate] else '1m'
                        out('flags dynamic,timeout')
                        out(f'timeout {timeout}')
                    out('}')


def suffix_mask(value, compare):
    """Convert suffix mask "IP/-mask" to nft."""
    if not compare:
        compare = '== '
    addr, mask = value.split('/-')
    mask = hex(pow(2, int(mask)) - 1)[2:]  # As long hex
    splitted = ''  # Convert to ":1234" parts
    while mask:
        splitted = f':{mask[-4:]}{splitted}'
        mask = mask[:-4]
    return f'& :{splitted} {compare}{addr}'


def single_or_set(data, fileline='', quote=False):
    """Convert data to single item or set if multiple values."""
    # Convert to list
    if isinstance(data, list):
        values = data
    else:
        values = data.split()

    # Handle negative: add "!=" to final rule
    neg = ''
    for index, value in enumerate(values):
        if value.startswith('-'):
            if index and not neg:
                fail(f'{fileline}Can\'t mix "+" and "-" items: '
                     f'{" ".join(values)}')
            neg = '!= '
        elif neg:
            fail(f'{fileline}Can\'t mix "+" and "-" items: {" ".join(values)}')
        if neg:
            values[index] = value[1:]

    # Quote interface names and similar items. "inet" is a reserved word
    # but can also be an interface name. It must be quoted.
    if quote:
        values = [value if value.isnumeric() else f'"{value}"'
                  for value in values]

    # Single item
    if len(values) == 1 and ' ' not in values[0]:
        if '/-' in values[0]:
            return suffix_mask(values[0], neg)
        return neg + values[0]

    # nft doesn't support "saddr { @foo, @bar }"
    if any(value.startswith('@') for value in values):
        fail(f'{fileline}Only single @list can be used: {" ".join(values)}')

    # Multiple, use "{set}"
    return f'{neg}{{ {", ".join(sorted(set(values)))} }}'


def netmask_to_and(masklist, ipv, fileline):
    """Parse "masklist 24 56" rule and return "and 255.255.255.0 " string.

    First value is mask for IPv4 and second is for IPv6.
    """
    if not masklist:
        return ''
    masks = [int(item) for item in masklist.split() if item.isnumeric()]
    if len(masks) != 2 or masks[0] > 32 or masks[1] > 128:
        fail(f'{fileline}Invalid rate_mask: {masklist}')

    if ipv == 4:
        ipaddr = ipaddress.IPv4Network(f'0.0.0.0/{masks[0]}')
        return f'and {ipaddr.netmask} '

    ipaddr = ipaddress.IPv6Network(f'::/{masks[1]}')
    return f'and {ipaddr.netmask} '


def limit_rate_or_ct(rate):
    """Return "limit rate x" or "ct count x" according to rate."""
    if rate.startswith('ct '):
        return f'{rate} '
    return f'limit rate {rate} '


def rule_rate_limit(rule, ipv):
    """Return rule's rate limits as nft update-command."""
    if rule['global_rate']:
        return limit_rate_or_ct(rule['global_rate'])

    ret = ''
    for rate in ('saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
        rate_limit = rule[rate]
        if not rate_limit:
            continue
        rate_name = rule[f'{rate}_name']
        rate_mask = rule[f'{rate}_mask']

        # "update @foo { ip saddr "
        ret += 'add' if rate_limit.startswith('ct ') else 'update'
        ret += f' @{rate_name}_{ipv} {{ '
        if 'saddr' in rate:
            ret += f'{"ip" if ipv == 4 else "ip6"} saddr '
            ret += netmask_to_and(rate_mask, ipv, rule['fileline'])
        if rate == 'saddr_daddr_rate':
            ret += '. '
        if 'daddr' in rate:
            ret += f'{"ip" if ipv == 4 else "ip6"} daddr '
            ret += netmask_to_and(rate_mask, ipv, rule['fileline'])
        # "limit rate 3/second } "
        ret += f'{limit_rate_or_ct(rate_limit)}}} '
    return ret


def mark_set_argument(rule):
    """Convert "x" to "x", "x/y" to "mark and ~y or x"."""
    value = rule['mark_set']
    x_y = value.split('/')
    if len(x_y) > 2:
        fail(f'{rule["fileline"]}Invalid "mark_set" value: {value}')
    if len(x_y) == 1:
        return value

    # Convert "/0xff00" to "/0xffff00ff" so that same mask is used in
    # set and match operations in config. Nftables requires 0xffff00ff.
    try:
        mask = int(x_y[1], 0)  # dec or hex
    except ValueError:
        fail(f'{rule["fileline"]}Invalid "mark_set" value: {value}')
    return f'meta mark & {hex(0xffffffff ^ mask)} | {x_y[0]}'


def mark_match(rule):
    """Convert "x" to "== x", "x/y" to "and y == x".

    Negative "-x" can also be used to get "!= x".
    """
    value = rule['mark_match']
    if not value:
        return ''
    check = '=='
    if value.startswith('-'):
        check = '!='
        value = value[1:]
    x_y = value.split('/')
    if len(x_y) > 2:
        fail(f'{rule["fileline"]}Invalid "mark_match" value: '
             f'{rule["mark_match"]}')
    if len(x_y) == 1:
        return f'meta mark {check} {value} '
    return f'meta mark & {x_y[1]} {check} {x_y[0]} '


def time_only_one(rule, oldvalue, newvalue):
    """Return newvalue if oldvalue is not set, else fail."""
    if oldvalue or not newvalue:
        fail(f'{rule["fileline"]}Invalid "time" value: {rule["time"]}')
    return newvalue


def time_match_single(rule, compare, value):
    """Return single time compare+value as nft."""
    if not compare and not value:
        return ''
    time_only_one(rule, None, value)  # Check that value is set

    # Parse value to day/hour/time
    wday = None
    hour = None
    date = None
    for item in value:
        if item.lower() in ('monday', 'tuesday', 'wednesday', 'thursday',
                            'friday', 'saturday', 'sunday'):
            wday = time_only_one(rule, wday, item.capitalize())
        elif (
                re.fullmatch(r'\d\d:\d\d(:\d\d)?', item) or  # hh:mm:ss, hh:mm
                re.fullmatch(r'\d\d:\d\d-\d\d:\d\d', item)   # hh:mm-hh:mm
        ):
            hour = time_only_one(rule, hour, item)
        elif re.fullmatch(r'\d\d\d\d-\d\d-\d\d', item):  # yyyy-mm-dd
            date = time_only_one(rule, date, item)
        else:
            fail(f'{rule["fileline"]}Invalid "time" value: {rule["time"]}')

    # Output as nft
    if date and hour:  # Combine if both set
        date += f' {hour}'
        hour = None
    ret = ''
    if wday:
        ret += f'day {compare}"{wday}" '
    if date:
        ret += f'time {compare}"{date}" '
    if hour:
        if '-' in hour:  # hh:mm-hh:mm may not be in ", others must
            ret += f'hour {compare}{hour} '
        else:
            ret += f'hour {compare}"{hour}" '
    return ret


def time_match(rule):
    """Convert "time Saturday" to nft, supporting time/day/hour."""
    if not rule['time']:
        return ''
    compare = ''
    value = []
    ret = ''
    for item in rule['time'].split():
        if item in ('==', '!=', '<', '>', '<=', '>='):
            ret += time_match_single(rule, compare, value)
            compare = '' if item == '==' else f'{item} '
            value = []
        else:
            value.append(item)
    ret += time_match_single(rule, compare, value)
    return ret


def cgroup_match(rule):
    """Parse cgroup to nft."""
    cgroup = rule['cgroup']
    if not cgroup:
        return ''

    # cgroupv2: non-numeric, single value only
    if not re.match(r'[-0-9]', cgroup[0]) and ' ' not in cgroup:
        level = cgroup.count('/') + 1
        return f'socket cgroupv2 level {level} "{cgroup}" '

    # cgroup
    return f'meta cgroup {single_or_set(cgroup, rule["fileline"])} '


def rule_statement(szone, dzone, rule, ipv, *, force_statement=None,
                   do_lograte=True):
    """Return rule's rate, log and statement as nft command."""
    # pylint: disable=too-many-arguments

    # Map internal statement to nft statement
    if rule['mss']:
        mss = 'rt mtu' if rule['mss'] == 'pmtu' else rule['mss']
        statement = f'tcp flags syn tcp option maxseg size set {mss}'
    else:
        statement = force_statement or {
            'snat_prefix': 'snat',
            'dnat_prefix': 'dnat',
        }.get(rule['statement'], rule['statement'])

    # queue can have optional flags
    if statement == 'queue' and rule[statement]:
        statement = f'{statement} {rule[statement]}'

    # Rate, counter and log goes before statement
    prefix = rule_rate_limit(rule, ipv)

    # "counter" in single rule line adds counters to it.
    #
    # "foomuuri { counter xxx }" can be used to add counters to all rules:
    #   yes        - add to all rules in all zone-zone
    #   zone-zone  - add to all rules in single zone-zone
    #   zone-any   - add to all rules in all zone-*
    #   any-zone   - add to all rules in all *-zone
    # Multiple zone-pairs can be defined
    counterlist = CONFIG['counter'].split()
    if not rule['nft'] and (  # pylint: disable=too-many-boolean-expressions
            rule['counter'] is not None or  # "counter" in this rule
            'yes' in counterlist or  # global "yes"
            f'{szone}-{dzone}' in counterlist or  # matching zone-zone
            f'{szone}-any' in counterlist or
            f'any-{dzone}' in counterlist
    ):
        prefix += 'counter '
        if rule['counter']:
            prefix += f'name "{rule["counter"]}" '

    # "log" in rule will log all packets matching this rule. This is usually
    # used in final "drop log" rule. Default log prefix is
    # "zone-zone STATEMENT".
    if rule['log'] is not None or rule['log_level'] is not None:
        log_prefix = CONFIG['log_prefix']
        if rule['log']:
            if rule['log'].startswith('+ '):
                log_prefix += rule['log'][2:]
            else:
                log_prefix = rule['log']
        log_prefix = log_prefix.replace('$(szone)', szone)
        log_prefix = log_prefix.replace('$(dzone)', dzone)
        log_prefix = log_prefix.replace('$(statement)',
                                        statement.split()[0].upper())
        log_level = (CONFIG['log_level'] if rule['log_level'] is None else
                     rule['log_level'])
        log_nft = f'log prefix "{log_prefix} " {log_level}'
        if do_lograte and CONFIG['log_rate']:
            # Limit maximum amount of logging to "foomuuri { log_rate }".
            # This is important to avoid overlogging (DoS or filesystem full).
            rate = (f'update @_lograte_set_{ipv} '
                    f'{{ {"ip" if ipv == 4 else "ip6"} saddr '
                    f'limit rate {CONFIG["log_rate"]} }} ')
            if statement == 'continue':
                return f'{prefix}{rate}{log_nft}'
            logname = f'lograte_{len(LOGRATES) + 1}'
            LOGRATES[logname] = (f'{rate}{log_nft}', statement)
            return f'{prefix}jump {logname}'
        prefix += f'{log_nft} '

    return prefix + statement


def output_icmp(szone, dzone, rules, ipv):
    """Find and parse icmp and icmpv6 rules.

    These must be handled before ct as "ct established" would accept
    ping floods. Default is to drop pings.
    """
    has_ping_rule = False
    has_match_all = False
    if ipv == 4:
        icmp = 'icmp'
        ping = '8'
    else:
        icmp = 'icmpv6'
        ping = '128'
    for rule in rules:
        if rule['protocol'] != icmp:
            continue

        match = rule_matchers(rule, ipv, skip_icmp=False)
        if match is None:
            continue
        statement = rule_statement(szone, dzone, rule, ipv)

        proto_ports = parse_protocol_ports(rule, ipv, skip_icmp=False)
        out(f'{proto_ports}{match}{statement}')
        if (
                rule['dport'] and
                ping not in rule['dport'].split() and
                'echo-request' not in rule['dport'].split()
        ):
            continue  # Continue to next rule if this wasn't ping or match all

        # This rule was for ping, usually accepting non-flood pings. Add
        # explicit rule to drop overflow and all other pings.
        has_ping_rule = True
        if (match + statement).startswith(
                ('accept', 'drop', 'reject', 'jump', 'queue')):
            has_match_all = True
        elif has_match_all:  # Specific ping rule after match-all rule
            warning(f'{rule["fileline"]}Unreachable ping rule')

    # Overflow-pings must be dropped before ct
    if has_ping_rule and not has_match_all:
        out(f'{icmp} type echo-request drop')

    # Allow needed icmp
    out(f'jump allow_icmp_{ipv}')


def parse_iplist(rule, direction, ipv):
    """Parse IP address list in rule[direction] to nft rule."""
    iplist = rule[direction]
    if not iplist:
        return ''

    ips = []
    for item in iplist.split():
        if item.startswith(('@', '-@')):  # "@foo" to "@foo_4"
            ips.append(f'{item}_{ipv}')
        else:
            ipv_addr = is_ip_address(item)
            if ipv_addr == ipv:  # Address for this ipv - add to list
                ips.append(item)
            elif not ipv_addr:  # Invalid IP address
                fail(f'{rule["fileline"]}Invalid IP address "{item}" in: '
                     f'{iplist}')

    # No matching addresses for this ipv family
    if not ips:
        raise ValueError

    # Return "ip saddr 10.2.3.4 " string
    return (f'{"ip" if ipv == 4 else "ip6"} {direction} '
            f'{single_or_set(ips, rule["fileline"])} ')


def parse_maclist(rule, direction):
    """Parse MAC address list in rule[direction] to nft rule."""
    maclist = rule[direction]
    if not maclist:
        return ''

    macs = []
    for item in maclist.split():
        item = item.lower()
        if re.match(r'^(-)?([0-9a-f]{2}:){5}[0-9a-f]{2}$', item):
            macs.append(item)
        else:
            fail(f'{rule["fileline"]}Invalid MAC address "{item}" in: '
                 f'{maclist}')

    # Return "ether saddr 0a:00:27:00:00:00 " string
    return f'ether {direction[4:]} {single_or_set(macs, rule["fileline"])} '


def parse_interface_names(rule):
    """Parse iifname/oifname to nft rule."""
    ret = ''
    for key in ('iifname', 'oifname'):
        if not rule[key]:
            continue
        ret += f'{key} '
        ret += single_or_set(rule[key], rule['fileline'], quote=True)
        ret += ' '
    return ret


def parse_protocol_ports(rule, ipv, skip_icmp=True):
    """Parse tcp/udp sport/dport to nft rule.

    This can also handle rules like:
    - "tcp" without dport to nft "protocol tcp"
    - "protocol esp" to nft "protocol esp"
    - "protocol esp 123" to nft "esp spi 123"
    - "protocol vlan 123" to nft "vlan id 123"
    """
    protocol = rule['protocol']
    if not protocol or (skip_icmp and protocol in ('icmp', 'icmpv6')):
        # Protocol is empty for rules like "drop log" and "dnat"
        # icmp is handled in output_icmp()
        return ''
    if ipv == 6 and protocol == 'igmp':
        return None  # IPv6 uses Multicast Listener Discovery ICMP

    ports = ''
    for key in ('sport', 'dport'):
        if rule[key]:
            protokey = key
            if key == 'dport':  # Change "dport" to protocol-specific key
                protokey = {
                    'ip': 'protocol',
                    'ip6': 'nexthdr',
                    'ah': 'spi',
                    'esp': 'spi',
                    'comp': 'nexthdr',
                    'icmp': 'type',
                    'icmpv6': 'type',
                    'dst': 'nexthdr',
                    'frag': 'nexthdr',
                    'hbh': 'nexthdr',
                    'mh': 'nexthdr',
                    'rt': 'nexthdr',
                    'vlan': 'id',
                    'arp': 'htype',
                }.get(protocol, protokey)
            ports += (f'{protocol} {protokey} '
                      f'{single_or_set(rule[key], rule["fileline"])} ')
    if ports:
        return ports
    return f'{"ip protocol" if ipv == 4 else "ip6 nexthdr"} {protocol} '


def parse_iplist_to_single_ip(rule, key, value, ipv):
    """Parse rule's iplist to single ipaddress, or None."""
    target = []
    for check in value.split():
        if check.count(':') == 1 and is_ip_address(check.split(':')[0]) == 4:
            check_ipv = 4  # IPv4 address with port
        elif (
                check.startswith('[') and
                ']:' in check and
                is_ip_address(check[1:].split(']:')[0]) == 6
        ):
            check_ipv = 6  # IPv6 address with port
        else:
            check_ipv = is_ip_address(check)
        if check_ipv == 0:
            fail(f'{rule["fileline"]}Invalid IP address in '
                 f'"{key}" target: {check}')
        if check_ipv == ipv:
            target.append(check)
    if not target:  # Nothing found for this ipv, don't generate rule
        return None
    if len(target) > 1:
        fail(f'{rule["fileline"]}Multiple "{key}" targets: {" ".join(target)}')
    return target[0]


def parse_to(rule, ipv):
    """Parse snat/dnat "to" rule to nft rule."""
    if not rule['to']:
        return ''
    if rule['statement'] == 'queue':  # "to 3", "to 1-3", "to numgen", ...
        return f' to {rule["to"]}'

    # "to" can be IPv4, IPv6 or both, find correct one
    target = parse_iplist_to_single_ip(rule, rule['statement'], rule['to'],
                                       ipv)
    if not target:
        return None

    # Generate rule
    to_text = 'to'
    if rule['statement'] in ('snat_prefix', 'dnat_prefix'):
        to_text = 'prefix to'
    return f' {"ip" if ipv == 4 else "ip6"} {to_text} {target}'


def rule_matchers(rule, ipv, *, cast=None, skip_options=True, skip_icmp=True):
    """Parse rule's matchers to nft rule.

    Return value "None" is "no match, skip rule".
    """
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-return-statements
    if cast is not None and rule['cast'] != cast:
        return None  # multi/broadcast doesn't match
    if ipv == 6 and rule['cast'] == 'broadcast':
        return None  # broadcast is ipv4 only
    if skip_icmp and rule['protocol'] in ('icmp', 'icmpv6'):
        return None
    if skip_options and rule['mss']:  # Handled in output_options()
        return None

    # IPv4/6 specific rule?
    if ipv == 4 and rule['ipv6'] and not rule['ipv4']:
        return None
    if ipv == 6 and rule['ipv4'] and not rule['ipv6']:
        return None

    # Convert matchers to nft
    castmeta = ''
    if cast and rule['cast'] != 'unicast':
        castmeta = f'meta pkttype {rule["cast"]} '

    ipsecmeta = ''
    if rule['sipsec']:
        ipsecmeta += f'meta ipsec {rule["sipsec"]} '
    if rule['dipsec']:
        ipsecmeta += f'rt ipsec {rule["dipsec"]} '

    ct_status = ''
    if rule['ct_status']:
        ct_status = f'ct status {rule["ct_status"]} '

    priority_match = ''
    if rule['priority_match']:
        priority_match = f'meta priority "{rule["priority_match"]}" '

    uid = ''
    if rule['uid']:
        uid = (f'meta skuid '
               f'{single_or_set(rule["uid"], rule["fileline"], quote=True)} ')
    gid = ''
    if rule['gid']:
        gid = (f'meta skgid '
               f'{single_or_set(rule["gid"], rule["fileline"], quote=True)} ')

    meta_set = ''
    if rule['mark_set']:
        meta_set += (f'meta mark set {mark_set_argument(rule)} '
                     f'ct mark set meta mark ')
    if rule['priority_set']:
        meta_set += f'meta priority set "{rule["priority_set"]}" '

    tproxy = ''
    if rule['tproxy']:
        tproxy_to = parse_iplist_to_single_ip(rule, 'tproxy', rule['tproxy'],
                                              ipv)
        if not tproxy_to:
            return None
        tproxy = f'tproxy {"ip" if ipv == 4 else "ip6"} to {tproxy_to} '

    dscp = ''
    if rule['dscp']:
        dscp = (f'{"ip" if ipv == 4 else "ip6"} dscp '
                f'{single_or_set(rule["dscp"], rule["fileline"])} ')

    ifname = parse_interface_names(rule)

    addrlist = ''
    try:
        addrlist += parse_iplist(rule, 'saddr', ipv)
        addrlist += parse_iplist(rule, 'daddr', ipv)
        addrlist += parse_maclist(rule, 'mac_saddr')
        addrlist += parse_maclist(rule, 'mac_daddr')
    except ValueError:
        return None

    proto_ports = parse_protocol_ports(rule, ipv)
    if proto_ports is None:
        return None

    # Return matcher string
    return (f'{ipsecmeta}{ct_status}{castmeta}{ifname}{addrlist}{proto_ports}'
            f'{cgroup_match(rule)}{time_match(rule)}{mark_match(rule)}'
            f'{priority_match}{dscp}{uid}{gid}{tproxy}{meta_set}')


def output_cast(cast, szone, dzone, rules, ipv, *, after_conntrack=True):
    """Output all uni/multi/broadcast rules for single zone-zone."""
    # pylint: disable=too-many-arguments
    has_mark_restore = False
    for rule in rules:
        if rule['after_conntrack'] != after_conntrack:
            continue
        match = rule_matchers(rule, ipv, cast=cast)
        if match is None:
            continue
        if match == '' and cast is None and rule['cast'] != 'unicast':
            continue  # Don't convert "multicast accept" to nft "accept"
        statement = rule_statement(szone, dzone, rule, ipv,
                                   force_statement=rule['nft'])

        # Automatically restore mark from conntrack before mark match or set
        # is used. Mark set needs it too for OR-operations.
        if not has_mark_restore and (rule['mark_match'] or rule['mark_set']):
            has_mark_restore = True
            out('meta mark set ct mark')

        out(f'{match}{statement}')

        # Does this rule need kernel helper?
        if rule['helper']:
            if rule['helper'].count('-') != 1:
                fail(f'{rule["fileline"]}Invalid helper name: '
                     f'{rule["helper"]}')
            HELPERS.append((rule['helper'], rule['protocol'], rule['dport']))
            kernelname = rule['helper'].split('-')[0].replace('_', '-')
            out(f'ct helper \"{kernelname}\" {rule["statement"]}')


def output_zonemap(zonemap, szone, dzone, ipv):
    """Output zonemap{} rules for this szone-dzone."""
    for rule in zonemap:
        if rule['szone'] and szone not in rule['szone'].split():
            continue
        if rule['dzone'] and dzone not in rule['dzone'].split():
            continue
        new_szone = rule['new_szone'] or szone
        new_dzone = rule['new_dzone'] or dzone
        if new_szone == szone and new_dzone == dzone:
            continue

        match = rule_matchers(rule, ipv)
        if match is None:
            continue
        out(f'{match}jump {new_szone}-{new_dzone}_{ipv}')


def output_options(szone, dzone, rules, ipv):
    """Output "mss" etc options as first rules."""
    for rule in rules:
        if rule['mss']:
            match = rule_matchers(rule, ipv, skip_options=False)
            if match is None:
                continue
            statement = rule_statement(szone, dzone, rule, ipv,
                                       do_lograte=False)
            out(f'{match}{statement}')


def output_zone(zonemap, szone, dzone, rules, ipv):
    """Output single zone-zone_ipv4 nft chain."""
    # Header + zonemap jumps + options
    out(f'chain {szone}-{dzone}_{ipv} {{')
    output_zonemap(zonemap, szone, dzone, ipv)
    output_options(szone, dzone, rules, ipv)

    # Rules with "-conntrack" or plain "log"/"counter" rule. Output these
    # before conntrack and icmp so that they see all traffic.
    output_cast(None, szone, dzone, rules, ipv, after_conntrack=False)

    # ICMP is special, keep it before ct
    output_icmp(szone, dzone, rules, ipv)

    # Connection tracking
    out('ct state vmap {')
    out('established : accept,')
    out('related : accept,')
    out('invalid : jump invalid_drop,')
    out(f'new : jump smurfs_{ipv},')
    out(f'untracked : jump smurfs_{ipv}')
    out('}')

    # Allow outgoing IGMP multicast membership reports and incoming IGMP
    # multicast query.
    output_chain = szone == CONFIG['localhost_zone']
    input_chain = dzone == CONFIG['localhost_zone']
    if ipv == 4 and output_chain and not input_chain:
        out('ip protocol igmp ip daddr 224.0.0.22 accept')  # membership report
    if ipv == 4 and input_chain and not output_chain:
        out('ip protocol igmp ip daddr 224.0.0.1 accept')   # query

    # Broadcast and multicast.
    #
    # "meta pkttype" works only for incoming packets so skip these if
    # szone=localhost (outgoing).
    #
    # "meta pkttype" works also for forwarding packets so write those rules
    # too (see below for zonemap reason). Multicast can't really be forwarded
    # as it is local to ip/netmask. Proxying is ok, where software listens one
    # interface and writes it to another.
    if not output_chain:
        output_cast('multicast', szone, dzone, rules, ipv)
        output_cast('broadcast', szone, dzone, rules, ipv)
        if ipv == 4:
            out('meta pkttype { broadcast, multicast } drop')
        else:
            out('meta pkttype multicast drop')

    # Unicast.
    #
    # Broadcast/multicast is already handled above for incoming packets so
    # output only unicast here for incoming.
    #
    # For forward/output add multicast rules without multicast matcher. This
    # means that for forward packets both with and without "meta pkttype"
    # rules will be outputted. This is needed for complex zonemap mangling
    # where szone=myservice might actually be from localhost.
    output_cast('unicast' if input_chain else None, szone, dzone, rules, ipv)
    out('}')


def output_zone_vmaps(zones, rules):
    """Output interface verdict maps to jump to correct zone-zone."""
    # pylint: disable=too-many-branches

    # Vmap must have interval-flag if there is wildcard-interface.
    localhost = CONFIG['localhost_zone']
    has_wildcard = False
    for value in zones.values():
        for interface in value['interface']:
            if '*' in interface:
                has_wildcard = True

    # Incoming zones
    out('map input_zones {')
    out('type ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    out('"lo" : accept,')
    for zone, value in zones.items():
        for interface in value['interface']:
            out(f'"{interface}" : jump {zone}-{localhost},')
    out('}')
    out('}')

    # Outgoing zones
    out('map output_zones {')
    out('type ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    if len(rules[(localhost, localhost)]) > 1:  # Jump to lo-lo if it has rules
        out(f'"lo" : jump {localhost}-{localhost},')
    else:
        out('"lo" : accept,')
    for zone, value in zones.items():
        for interface in value['interface']:
            out(f'"{interface}" : jump {localhost}-{zone},')
    out('}')
    out('}')

    # Forwarding zones
    out('map forward_zones {')
    out('type ifname . ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    out('"lo" . "lo" : accept,')
    for szone, svalue in zones.items():
        for dzone, dvalue in zones.items():
            for sinterface in svalue['interface']:
                for dinterface in dvalue['interface']:
                    out(f'"{sinterface}" . "{dinterface}" : '
                        f'jump {szone}-{dzone},')
    out('}')
    out('}')


def output_zone2zone_rules(rules, zonemap):
    """Output all zone-zone rules for both IPv4 and IPv6."""
    for szone, dzone in rules:
        # Split to zone-zone_4 and zone-zone_6
        out(f'chain {szone}-{dzone} {{')
        out('meta nfproto vmap {')
        out(f'ipv4 : jump {szone}-{dzone}_4,')
        out(f'ipv6 : jump {szone}-{dzone}_6')
        out('}')
        out('}')

        # IPv4 and IPv6 chains
        output_zone(zonemap, szone, dzone, rules[(szone, dzone)], 4)
        output_zone(zonemap, szone, dzone, rules[(szone, dzone)], 6)


def output_rule_section_no_header(rules, section):
    """Output snat, dnat, prerouting, postrouting, etc. rules inside chain."""
    if not rules:
        return

    has_mark_restore = False
    ip_merger = set()
    for rule in rules:
        for ipv in (4, 6):
            to_rule = parse_to(rule, ipv)
            if to_rule is None:
                continue
            match = rule_matchers(rule, ipv, skip_options=False)
            if match is None:
                continue
            statement = rule_statement(section.upper(), '', rule, ipv,
                                       force_statement=rule['nft'],
                                       do_lograte=False)

            # Automatically restore mark from conntrack when needed
            if (
                    not has_mark_restore and
                    (rule['mark_match'] or rule['mark_set'])
            ):
                has_mark_restore = True
                out('meta mark set ct mark')

            # There are no separate IPv4/IPv6 chains so merge possible rules
            full_rule = f'{match}{statement}{to_rule}'
            if full_rule in ip_merger:
                continue
            ip_merger.add(full_rule)
            out(full_rule)


def output_special_chains(chain_rules):
    """Output snat, dnat, prerouting postrouting, etc. chain + rules."""
    for prefix in ('snat', 'dnat',
                   'prerouting', 'postrouting',
                   'forward', 'input', 'output'):
        for (section, ftype, priority), rules in chain_rules.items():
            if prefix != section:
                continue

            # Output-chain must restore mark from conntrack if mark_set was
            # used for locally generated packets (usually in multiple ISP's
            # prerouting).
            #
            # Simply restore mark if mark_set was used in any chain. For that
            # reason output-chain should be outputted last.
            need_mark_restore = section == 'output' and any(
                'ct mark set' in line for line in OUT)

            if not rules and not need_mark_restore:
                continue

            hook_chain = {
                'snat': 'postrouting',
                'dnat': 'prerouting',
            }.get(section, section)
            hook_nft = f'type {ftype} hook {hook_chain} priority {priority}'
            chain_name = f'{ftype}_{hook_chain}_{priority}'
            chain_name = chain_name.replace(CONFIG['priority_offset'], '')
            chain_name = chain_name.replace('+', '')
            chain_name = chain_name.replace(' ', '_')
            chain_name = chain_name.replace('__', '_')
            out(f'chain {chain_name} {{')
            out(hook_nft)
            if need_mark_restore:
                out('meta mark set ct mark')
            output_rule_section_no_header(rules, section)
            out('}')


def output_static_chain_logging(chain, statement):
    """Output logging rules for input/output/forward/invalid/smurfs chains."""
    lograte = CONFIG[f'log_{chain}']  # Enabled in foomuuri{}
    if lograte.lower() == 'no':
        return
    if lograte.lower() == 'yes':  # "yes" means use standard log rate
        lograte = CONFIG['log_rate']
    flags = ' flags ip options' if chain == 'invalid' else ''
    flags += f' {CONFIG["log_level"]}'
    chain = chain.upper()
    statement = statement.upper()

    if not lograte:
        out(f'log prefix "{chain} {statement} "{flags}')
        return

    out(f'update @_lograte_set_4 {{ ip saddr limit rate {lograte} }} '
        f'log prefix "{chain} {statement} "{flags}')
    out(f'update @_lograte_set_6 {{ ip6 saddr limit rate {lograte} }} '
        f'log prefix "{chain} {statement} "{flags}')


def output_header(chain_rules):
    """Output generic nft header."""
    # Delete current foomuuri table and add new
    out('table inet foomuuri')
    out('delete table inet foomuuri')
    out('')
    out('table inet foomuuri {')
    out('')

    # Insert include files
    for filename in (find_config_files(CONFIG['_share_dir'], '*.nft') +
                     find_config_files(CONFIG['_etc_dir'], '*.nft')):
        try:
            lines = filename.read_text('utf-8').splitlines()
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')
        for line in lines:
            line = line.strip()
            if line:
                out(line)

    # Logging chains
    for chain in ('invalid', 'smurfs', 'rpfilter'):
        out(f'chain {chain}_drop {{')
        output_rule_section_no_header(chain_rules.get((chain, None, None), []),
                                      chain)
        if chain == 'rpfilter':
            out('udp sport 67 udp dport 68 return')
        output_static_chain_logging(chain, 'drop')
        out('drop')
        out('}')

    # input/output/forward jump chains
    out('chain input {')
    out(f'type filter hook input priority filter{CONFIG["priority_offset"]}')
    out('iifname vmap @input_zones')
    output_static_chain_logging('input', 'drop')
    out('drop')
    out('}')

    out('chain output {')
    out(f'type filter hook output priority filter{CONFIG["priority_offset"]}')
    out('oifname vmap @output_zones')
    # IGMP membership report and IPv6 equivalent must be allowed here too as
    # D-Bus interface change event might not be processed yet.
    out('ip protocol igmp ip daddr 224.0.0.22 accept')
    out('ip6 saddr :: icmpv6 type mld2-listener-report accept')
    output_static_chain_logging('output', 'reject')
    out('reject with icmpx admin-prohibited')
    out('}')

    out('chain forward {')
    out(f'type filter hook forward priority filter{CONFIG["priority_offset"]}')
    out('iifname . oifname vmap @forward_zones')
    output_static_chain_logging('forward', 'drop')
    out('drop')
    out('}')


MERGES = [  # Preferred order for rules
    'meta pkttype multicast udp dport',
    'meta pkttype broadcast udp dport',
    'udp dport',
    'udp sport',
    'tcp dport',
    'tcp sport',
    'ct helper',
]


def merge_accepts(accepts, linenum):
    """Sort and merge found accept rules."""
    merge = {key: [] for key in MERGES[::-1]}
    ret = 0
    for accept in accepts:
        for key, ports in merge.items():
            regex = f'^{key} (\\{{ )?([-\\d, ]+)( \\}})? accept$'
            match = re.match(regex, accept)
            if match:  # Add "22" from "tcp dport 22 accept" to merged
                ports.append(match.group(2))
                break
        else:  # Can't merge, output as is
            OUT.insert(linenum + ret, accept)
            ret += 1

    # Output merged
    for key, ports in merge.items():
        if ports:
            OUT.insert(linenum, f'{key} {single_or_set(ports)} accept')
            ret += 1
    return ret


def optimize_accepts():
    """Optimize ruleset accepts.

    This will change multiple accepts to single accept using set.
    """
    accepts = []
    linenum = 0
    while linenum < len(OUT):
        line = OUT[linenum]
        if line == 'continue':  # No-op line generated by plain "counter"
            del OUT[linenum]
        elif (
                line.endswith(' accept') and
                line.startswith(tuple(MERGES) + ('ip ', 'ip6 '))
        ):
            accepts.append(line)
            del OUT[linenum]
        else:
            linenum += merge_accepts(accepts, linenum) + 1
            accepts = []


def optimize_jumps():
    """Optimize lograte jumps in ruleset.

    This will change zone-zone's final "drop log" rule to optimized version.
    """
    linenum = 0
    while linenum < len(OUT):
        line = OUT[linenum]
        if line.startswith('jump lograte_'):
            logname = line.split()[1]
            log_nft, statement = LOGRATES.pop(logname)
            OUT[linenum] = log_nft
            OUT.insert(linenum + 1, statement)
        linenum += 1


def optimize_final_rules():
    """Remove unreachable rules from chain after "drop" without matcher."""
    linenum = 0
    while linenum < len(OUT):
        if OUT[linenum].startswith(('accept', 'drop', 'reject', 'queue')):
            while OUT[linenum + 1] != '}':
                del OUT[linenum + 1]
        linenum += 1


def output_logrates():
    """Output non-optimized lograte entries as chains."""
    for logname, (log_nft, statement) in LOGRATES.items():
        out(f'chain {logname} {{')
        out(log_nft)
        out(statement)
        out('}')

    # Output empty lograte sets used by "foomuuri { log_rate }".
    for ipv in (4, 6):
        out(f'set _lograte_set_{ipv} {{')
        out(f'type ipv{ipv}_addr')
        out(f'size {CONFIG["set_size"]}')
        out('flags dynamic,timeout')
        out('timeout 1m')
        out('}')


def output_iplist_sets(iplist):
    """Output empty iplist{} sets."""
    for name in sorted(iplist):
        for ipv in (4, 6):
            out(f'set {name[1:]}_{ipv} {{')
            out(f'type ipv{ipv}_addr')
            out('flags interval,timeout')
            out('auto-merge')
            out('}')


def output_named_counters(rules, chain_rules):
    """Output named counters."""
    # Collect all counter names
    names = set()
    for rulelist in list(rules.values()) + list(chain_rules.values()):
        for rule in rulelist:
            if rule['counter']:
                names.add(rule['counter'])

    # Output counters
    for name in sorted(names):
        out(f'counter {name} {{')
        out('}')


def output_helpers():
    """Output helpers."""
    # Convert helper list to helper->proto->set(ports) dict
    helpers = {}
    for name, proto, ports in HELPERS:
        if name not in helpers:
            helpers[name] = {}
        if proto not in helpers[name]:
            helpers[name][proto] = set()
        for port in ports.split():
            helpers[name][proto].add(port)
    if not helpers:
        return

    # Output "ct helper" lines
    for name, protos in helpers.items():
        kernelname = name.split('-')[0].replace('_', '-')
        out(f'ct helper {name} {{')
        for proto in protos:
            out(f'type \"{kernelname}\" protocol {proto}')
        out('}')

    # Output prerouting
    out('chain helper {')
    out(f'type filter hook prerouting priority filter'
        f'{CONFIG["priority_offset"]}')
    for name, protos in helpers.items():
        for proto, ports in protos.items():
            out(f'{proto} dport {single_or_set(" ".join(ports))} '
                f'ct helper set \"{name}\"')
    out('}')


def output_rpfilter():
    """Prerouting chain to check rpfilter."""
    if CONFIG['rpfilter'] == 'no':
        return
    out('chain rpfilter {')
    out(f'type filter hook prerouting priority filter'
        f'{CONFIG["priority_offset"]}')
    interfaces = ''
    if CONFIG['rpfilter'] != 'yes':  # Specific interfaces?
        interfaces = (f'iifname '
                      f'{single_or_set(CONFIG["rpfilter"], quote=True)} ')
    out(f'{interfaces}fib saddr . mark . iif oif 0 meta ipsec missing '
        f'jump rpfilter_drop')
    out('}')


def output_footer():
    """Output generic ruleset footer."""
    out('}')


def save_file(filename, lines):
    """Write lines to file."""
    try:
        filename.unlink(missing_ok=True)
        if isinstance(lines, dict):
            filename.write_text(json.dumps(lines, indent=2, sort_keys=True) +
                                '\n', 'utf-8')
        else:
            filename.write_text('\n'.join(lines) + '\n', 'utf-8')
        filename.chmod(0o600)
    except PermissionError as error:
        fail(f'File {filename}: Can\'t write: {error}')
    except FileNotFoundError:  # Simultaneos saves and chmod gives error
        pass


def env_cleanup(text):
    """Allow only letters and numbers in text for environment variable."""
    # Convert ä->a as isalpha('ä') is true
    value = unicodedata.normalize('NFKD', text)
    value = value.encode('ASCII', 'ignore').decode('utf-8')

    # Remove non-alphanumeric chars
    return ''.join(char if char.isalnum() else '_' for char in value)


def save_final(filename):
    """Save final ruleset to file."""
    # Convert to indented lines
    indent = 0
    lines = []
    for line in OUT:
        if line.startswith('}'):
            indent -= 1
        if line:
            line = '\t' * indent + line
        lines.append(line)
        if line == '\t}':
            lines.append('')
        if line.endswith('{'):
            indent += 1

    # Save to "next" file
    save_file(filename, lines)


def signal_childs():
    """Signal foomuuri-dbus and foomuuri-monitor to reload."""
    for child in ('dbus', 'monitor'):
        # Read pid
        filename = CONFIG['_run_dir'] / f'foomuuri-{child}.pid'
        try:
            pid = int(filename.read_text(encoding='utf-8'))
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')
        except (FileNotFoundError, ValueError):
            continue

        # Send reload-signal
        try:
            os.kill(pid, signal.SIGHUP)
        except OSError:
            pass


def apply_final():
    """Use final ruleset."""
    # Check config
    if CONFIG['command'] == 'check':
        if CONFIG['root_power']:  # "nft check" requires root
            ret = run_program_rc(CONFIG['_nft_bin'] + ['--check', '--file',
                                                       CONFIG['_next_file']])
        else:
            ret = 0
            warning('Not running as "root", skipping "nft check"')
        if ret:
            fail(f'Nftables failed to check ruleset, error code {ret}', False)
        else:
            verbose('check success', 0)
        return ret

    # Run pre_start / pre_stop hook
    run_program_rc(CONFIG.get(f'pre_{CONFIG["command"]}'))

    # Load "next"
    ret = run_program_rc(CONFIG['_nft_bin'] + ['--file', CONFIG['_next_file']],
                         print_output=False)

    # Check failure
    if ret:
        fail(f'Failed to load ruleset to nftables, error code {ret}', False)
        return 1

    # Success. Rename "next" to "good", signal dbus to reload and run hook
    if CONFIG['command'] == 'start':
        CONFIG['_good_file'].unlink(missing_ok=True)
        CONFIG['_next_file'].rename(CONFIG['_good_file'])
    signal_childs()
    run_program_rc(CONFIG.get(f'post_{CONFIG["command"]}'))
    verbose(f'{CONFIG["command"]} success', 0)
    return 0


def command_start():
    """Process "start" or "check" command."""
    # Read full config
    config = minimal_config()
    zones = parse_config_zones(config)
    zonemap = parse_config_zonemap(config)
    iplist = parse_iplist_names(config)
    chain_rules = parse_config_special_chains(config)
    parse_config_groups(config, parse_config_targets(config))
    parse_config_hook(config)
    rules = parse_config_rules(config)  # Also verify for unknown sections
    insert_any_zones(zones, rules)
    verify_config(config, zones, rules)

    # Generate output
    output_header(chain_rules)
    output_rate_names(rules)
    output_zone_vmaps(zones, rules)
    output_zone2zone_rules(rules, zonemap)
    output_special_chains(chain_rules)
    optimize_jumps()
    optimize_final_rules()
    optimize_accepts()
    output_logrates()
    output_iplist_sets(iplist)
    output_named_counters(rules, chain_rules)
    output_helpers()
    output_rpfilter()
    output_footer()
    command_iplist_refresh(config, iplist)

    # Save known zones to file
    save_file(CONFIG['_zone_file'], zones.keys())

    # Save and apply generated ruleset
    save_final(CONFIG['_next_file'])
    return apply_final()


def command_stop():
    """Process "stop" command. This will remove all foomuuri rules."""
    config = minimal_config()  # Needed for pre_stop and post_stop hooks
    parse_config_hook(config)
    out('table inet foomuuri')
    out('delete table inet foomuuri')
    save_final(CONFIG['_next_file'])
    return apply_final()


def command_block():
    """Load "block all traffic" ruleset."""
    minimal_config()
    return run_program_rc(CONFIG['_nft_bin'] + [
        '--file', CONFIG['_share_dir'] / 'block.fw'])


def parse_active_interface_zone():
    """Parse current interface->zone mapping from active nft ruleset."""
    data = nft_json('list map inet foomuuri input_zones')
    if not data:
        return {}
    ret = {}
    for item in data['nftables']:
        if 'map' in item:
            for interface, rule in item['map']['elem']:
                if interface != 'lo':
                    ret[interface] = rule['jump']['target'].split('-')[0]
    return ret


def command_status():
    """Print if Foomuuri is running, zone<->mapping, etc."""
    # Get minimal config and interface status
    config = minimal_config()
    zones = parse_config_zones(config)
    zone_interface = parse_active_interface_zone()

    # Running
    ruleset = nft_json('list table inet foomuuri')
    if not ruleset:
        fail('Foomuuri is not running')
    print('Foomuuri is running')

    # D-Bus, Monitor
    for child in ('dbus', 'monitor'):
        filename = CONFIG['_run_dir'] / f'foomuuri-{child}.pid'
        try:
            pid = int(filename.read_text(encoding='utf-8'))
            os.kill(pid, 0)
            print(f'Foomuuri-{child} is running, PID {pid}')
        except (FileNotFoundError, ValueError, PermissionError,
                ProcessLookupError):
            print(f'Foomuuri-{child} is not running')

    # Zones
    if not zone_interface:
        print()
        print('Warning: There are no interfaces assigned to any zones')

    print()
    print('zone {')
    for zone in zones:
        interfaces = [interface
                      for interface, int_zones in zone_interface.items()
                      if zone == int_zones]
        print(f'  {zone:15s} {" ".join(interfaces)}')
    print('}')
    return 0


class FoomuuriDbusException(dbus.DBusException):
    """Exception class for D-Bus interface."""

    _dbus_error_name = 'fi.foobar.Foomuuri1.exception'


class DbusCommon:
    """D-Bus server - Common Functions."""

    zones = None

    def set_data(self, zones):
        """Save config data: zone list is static."""
        self.zones = zones

    @staticmethod
    def clean_out():
        """Remove all entries from current OUT[] variable."""
        while OUT:
            del OUT[0]

    @staticmethod
    def apply_out():
        """Apply current OUT commands."""
        save_final(CONFIG['_dbus_file'])
        run_program_rc(CONFIG['_nft_bin'] + ['--file', CONFIG['_dbus_file']])

    @staticmethod
    def remove_interface(interface_zone, interface):
        """Remove interface from all zones."""
        # Get interface's current zone
        zone = interface_zone.get(interface)
        if not zone:
            return ''

        # Remove from input and output
        out(f'delete element inet foomuuri input_zones '
            f'{{ "{interface}" : jump {zone}-{CONFIG["localhost_zone"]} }}')
        out(f'delete element inet foomuuri output_zones '
            f'{{ "{interface}" : jump {CONFIG["localhost_zone"]}-{zone} }}')

        # Remove from forward
        for other, otherzone in interface_zone.items():
            out(f'delete element inet foomuuri forward_zones '
                f'{{ "{other}" . "{interface}" : jump {otherzone}-{zone} }}')
            if other != interface:
                out(f'delete element inet foomuuri forward_zones '
                    f'{{ "{interface}" . "{other}" : '
                    f'jump {zone}-{otherzone} }}')
        return zone

    @staticmethod
    def add_interface(interface_zone, interface, zone):
        """Add interface to zone. It must be already removed from others."""
        # Add to input and output
        out(f'add element inet foomuuri input_zones '
            f'{{ "{interface}" : jump {zone}-{CONFIG["localhost_zone"]} }}')
        out(f'add element inet foomuuri output_zones '
            f'{{ "{interface}" : jump {CONFIG["localhost_zone"]}-{zone} }}')

        # Add to forward
        for other, otherzone in interface_zone.items():
            if other != interface:
                out(f'add element inet foomuuri forward_zones '
                    f'{{ "{other}" . "{interface}" : '
                    f'jump {otherzone}-{zone} }}')
                out(f'add element inet foomuuri forward_zones '
                    f'{{ "{interface}" . "{other}" : '
                    f'jump {zone}-{otherzone} }}')
        out(f'add element inet foomuuri forward_zones '
            f'{{ "{interface}" . "{interface}" : jump {zone}-{zone} }}')

    def change_interface_zone(self, interface, new_zone):
        """Change interface to new_zone, or delete if new_zone is empty."""
        interface, new_zone = str(interface), str(new_zone)
        if new_zone and new_zone not in self.zones:
            warning(f'Zone "{new_zone}" is unknown')
            raise FoomuuriDbusException(f'Zone "{new_zone}" is unknown')
        if interface == 'lo':
            # Interface "lo" must stay in "localhost" zone.
            # Other interfaces can be added to "localhost" only if
            # "localhost-localhost" section is defined.
            if new_zone != CONFIG['localhost_zone']:
                warning(f'Can\'t change interface "lo" to zone "{new_zone}"')
                raise FoomuuriDbusException(f'Can\'t change interface "lo" '
                                            f'to zone "{new_zone}"')
            return '', ''
        interface_zone = parse_active_interface_zone()
        self.clean_out()
        old_zone = self.remove_interface(interface_zone, interface)
        if new_zone:
            self.add_interface(interface_zone, interface, new_zone)
        self.apply_out()
        return old_zone, new_zone

    def parse_default_zone(self, interface, zone):
        """Return zone, or dbus_zone if empty."""
        interface, zone = str(interface), str(zone)
        if zone:
            return zone

        # Fallback to zones section, or to foomuuri.dbus_zone
        for key, value in self.zones.items():
            if interface in value['interface']:
                return key
        return CONFIG['dbus_zone']

    def method_get_zones(self):
        """Get list of available zones.

        "localhost" can't have any interfaces so don't include it.
        """
        return [name for name in self.zones
                if name != CONFIG['localhost_zone']]

    def method_remove_interface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty.

        This is currently always handled as "from all".
        """
        verbose(f'Interface "{interface}" remove from zone "{zone}"', 0)
        return self.change_interface_zone(interface, '')[0]

    def method_add_interface(self, zone, interface):
        """Add interface to zone.

        There can be only one zone per interface so it will be removed
        from previous zone if needed.
        """
        zone = self.parse_default_zone(interface, zone)
        verbose(f'Interface "{interface}" add to zone "{zone}"', 0)
        return self.change_interface_zone(interface, zone)[1]

    def method_change_zone_of_interface(self, zone, interface):
        """Change interface to zone."""
        zone = self.parse_default_zone(interface, zone)
        verbose(f'Interface "{interface}" change to zone "{zone}"', 0)
        return self.change_interface_zone(interface, zone)[0]


class DbusFoomuuri(dbus.service.Object, DbusCommon):
    """D-Bus server for Foomuuri."""

    # pylint: disable=invalid-name  # dbus method names

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='', out_signature='as')
    def getZones(self):
        """Get list of available zones."""
        return self.method_get_zones()

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def removeInterface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty.

        Return: previous zone
        """
        return self.method_remove_interface(zone, interface)

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def addInterface(self, zone, interface):
        """Add interface to zone.

        Return: new zone
        """
        return self.method_add_interface(zone, interface)

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def changeZoneOfInterface(self, zone, interface):
        """Change interface to zone.

        Return: previous zone
        """
        return self.method_change_zone_of_interface(zone, interface)


class DbusFirewallD(dbus.service.Object, DbusCommon):
    """D-Bus server for FirewallD emulation."""

    # pylint: disable=invalid-name  # dbus method names

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='', out_signature='as')
    def getZones(self):
        """Get list of available zones."""
        return self.method_get_zones()

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def removeInterface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty.

        Return: previous zone
        """
        return self.method_remove_interface(zone, interface)

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def addInterface(self, zone, interface):
        """Add interface to zone.

        Return: new zone
        """
        return self.method_add_interface(zone, interface)

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def changeZoneOfInterface(self, zone, interface):
        """Change interface to zone.

        Return: previous zone
        """
        return self.method_change_zone_of_interface(zone, interface)


def command_dbus():
    """Start D-Bus daemon."""
    CONFIG['keep_going'] = True
    while CONFIG['keep_going']:
        # Read minimal config
        config = minimal_config()
        zones = parse_config_zones(config)
        daemonize()

        # Initialize D-Bus
        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
        bus = dbus.SystemBus()

        # Foomuuri D-Bus calls
        try:
            foomuuri_name = dbus.service.BusName('fi.foobar.Foomuuri1', bus)
            foomuuri_name.get_name()  # Dummy call to get rid of pylint
        except dbus.exceptions.DBusException:
            fail('Can\'t bind to system D-Bus: fi.foobar.Foomuuri1')
        foomuuri_object = DbusFoomuuri(bus, '/fi/foobar/Foomuuri1')
        foomuuri_object.set_data(zones)

        # FirewallD emulation calls, if enabled in foomuuri{} config
        firewalld_object = None
        if CONFIG['dbus_firewalld'] == 'yes':
            try:
                firewalld_name = dbus.service.BusName(
                    'org.fedoraproject.FirewallD1', bus)
                firewalld_name.get_name()  # Dummy call to get rid of pylint
            except dbus.exceptions.DBusException:
                fail('Can\'t bind to system D-Bus: '
                     'org.federaproject.FirewallD1')
            firewalld_object = DbusFirewallD(bus,
                                             '/org/fedoraproject/FirewallD1')
            firewalld_object.set_data(zones)

        # Define reload/stop signal handler
        mainloop = GLib.MainLoop()

        def signal_handler(sig, _dummy_frame):
            if sig == signal.SIGINT:
                CONFIG['keep_going'] = False
                if HAVE_NOTIFY:
                    notify('STOPPING=1')
            elif HAVE_NOTIFY:
                notify('RELOADING=1')
            mainloop.quit()

        signal.signal(signal.SIGHUP, signal_handler)
        signal.signal(signal.SIGINT, signal_handler)
        save_file(CONFIG['_run_dir'] / 'foomuuri-dbus.pid', [str(os.getpid())])

        # Start processing messages
        verbose('D-Bus handler ready', 0)
        if HAVE_NOTIFY:
            notify('READY=1')
        mainloop.run()

        # Reload signal received. Disconnect from D-Bus.
        foomuuri_object.remove_from_connection()
        del foomuuri_object
        del foomuuri_name
        if firewalld_object:
            firewalld_object.remove_from_connection()
            del firewalld_object
            del firewalld_name
        del bus


def command_set():
    """Modify runtime config by calling D-Bus methods."""
    if (
            len(sys.argv) != 6 or
            sys.argv[2] != 'interface' or
            sys.argv[4] != 'zone'
    ):
        command_help()
    try:
        bus = dbus.SystemBus()
        obj = bus.get_object('fi.foobar.Foomuuri1', '/fi/foobar/Foomuuri1')
        if sys.argv[5] in ('', '-'):
            obj.removeInterface('', sys.argv[3])
        else:
            obj.changeZoneOfInterface(sys.argv[5], sys.argv[3])
    except dbus.exceptions.DBusException as error:
        fail(error)


def remove_filters(text):
    """Remove tailing flags and filters from hostname or URL."""
    return text.split('|')[0]


def resolve_one_hostname(hostname):
    """Resolve hostname and return its IP addresses."""
    ret = set()
    try:
        for item in socket.getaddrinfo(remove_filters(hostname), None):
            if item[4] and item[4][0]:
                ret.add(item[4][0])
    except socket.gaierror:
        pass
    return ret


def resolve_all_hostnames(resolve, cache):
    """Resolve hostnames to IP addresses and save them to cache."""
    # pylint: disable=too-many-locals

    # Collect list of hostnames to resolve
    now = datetime.datetime.now(datetime.timezone.utc)
    todo = set()
    for hostname, (timeout, refresh) in resolve.items():
        cachevalue = cache.get(hostname, {}).get('ip', {})
        cacherefresh = cache.get(hostname, {}).get('refresh')
        if cachevalue and cacherefresh:
            host_timeout = cacherefresh + datetime.timedelta(seconds=timeout)
            next_refresh = cacherefresh + datetime.timedelta(seconds=refresh)
            if now >= host_timeout:
                cache[hostname]['ip'] = {}
                cache[hostname]['dirty'] = True
            elif now < next_refresh and CONFIG['force'] < 0:
                verbose(f'Using cached value for '
                        f'"{remove_filters(hostname)}"')
                continue
        todo.add(hostname)
    if not todo:
        return

    # Resolve them
    verbose(f'DNS lookup for: {", ".join(todo)}')
    with concurrent.futures.ThreadPoolExecutor() as executor:
        jobs = {executor.submit(resolve_one_hostname, hostname): hostname
                for hostname in todo}
        for future in concurrent.futures.as_completed(jobs):
            hostname = jobs[future]
            addrlist = future.result()
            if addrlist:
                # Append/update addresses, don't replace whole list.
                # Some cloud services will change server IP a lot, and some
                # will return only partial list of addresses. By appending
                # old addresses will still work until lookup timeout.
                iplist_cache_init(cache, hostname)
                cache[hostname]['dirty'] = True
                cache[hostname]['refresh'] = now
                expire = now + datetime.timedelta(seconds=resolve[hostname][0])
                for addr in addrlist:
                    cache[hostname]['ip'][addr] = expire
                verbose(f'Hostname "{remove_filters(hostname)}" resolved '
                        f'to: {", ".join(addrlist)}')
            elif '|missing-ok' not in hostname:
                warning(f'No IP address found for hostname '
                        f'"{remove_filters(hostname)}" in iplist')


def active_sets():
    """Return set names in currently active firewall."""
    ret = set()
    data = nft_json('list sets table inet foomuuri')
    if not data:
        return ret
    for item in data['nftables']:
        if 'set' in item:
            ret.add(f'@{item["set"]["name"][:-2]}')
    return ret


def get_url(url):
    """Download URL and return it as text, or None if failure."""
    if HAVE_REQUESTS:
        for retry in range(3):
            if retry:
                time.sleep(10)
            try:
                response = requests.get(remove_filters(url), timeout=50)
            except OSError as error:
                err = f'error: {error}'
            else:
                if response.status_code == 200:
                    return response.text
                err = f'status code: {response.status_code}'
    else:
        err = 'No python-requests installed'
    if '|missing-ok' not in url:
        warning(f'Can\'t download iplist URL "{remove_filters(url)}": {err}')
    return None


def get_file(wildcard):
    """Read all files and return them as single text."""
    wildpath = pathlib.Path(remove_filters(wildcard))
    filenames = sorted(wildpath.parent.glob(wildpath.name))
    if not filenames:
        if '|missing-ok' not in wildcard:
            warning(f'Can\'t read iplist file "{remove_filters(wildcard)}": '
                    f'No such file')
        return None
    text = ''
    for filename in filenames:
        try:
            content = filename.read_text(encoding='utf-8')
        except PermissionError as error:
            warning(f'Can\'t read iplist file "{filename}": {error}')
            return None
        text = text + '\n' + content
    return text


def parse_hour_min(timespec, fallback):
    """Parse 4h3m to seconds.

    This is dummy parser without any good error checking.
    """
    if not timespec:
        return fallback
    timespec = timespec.replace(' ', '')
    days = hours = minutes = seconds = 0
    if 'd' in timespec:
        days, timespec = timespec.split('d', 1)
    if 'h' in timespec:
        hours, timespec = timespec.split('h', 1)
    if 'm' in timespec:
        minutes, timespec = timespec.split('m', 1)
    if 's' in timespec:
        seconds, timespec = timespec.split('s', 1)
    if timespec:
        return fallback
    try:
        return (int(days) * 86400 +
                int(hours) * 3600 +
                int(minutes) * 60 +
                int(seconds))
    except ValueError:
        return fallback


def iterate_set_elements(data):
    """Iterate elements from "nft --json list set" output."""
    for toplevel in (data or {}).get('nftables', []):
        for elem in toplevel.get('set', {}).get('elem', []):
            if isinstance(elem, dict) and 'elem' in elem:
                ipaddr = elem['elem']['val']
                expire = elem['elem'].get('expires', 864000)  # 10 days
            else:
                ipaddr = elem
                expire = 864000
            if isinstance(ipaddr, str):
                yield ipaddr, expire
            elif 'range' in ipaddr:
                yield f'{ipaddr["range"][0]}-{ipaddr["range"][1]}', expire
            else:
                yield (f'{ipaddr["prefix"]["addr"]}/'
                       f'{ipaddr["prefix"]["len"]}', expire)


def command_iplist_list():
    """List iplist entries."""
    config = minimal_config()
    known = parse_iplist_names(config)
    for setname in CONFIG['parameters'][1:] or sorted(known):
        if setname.startswith('@'):
            setname = setname[1:]
        if setname.endswith(('_4', '_6')):
            data4 = nft_json(f'list set inet foomuuri {setname}')
            data6 = {}
        else:
            data4 = nft_json(f'list set inet foomuuri {setname}_4')
            data6 = nft_json(f'list set inet foomuuri {setname}_6')
        elem4 = [ipaddr for ipaddr, _dummy in iterate_set_elements(data4)]
        elem6 = [ipaddr for ipaddr, _dummy in iterate_set_elements(data6)]
        elems = sorted(elem4) + sorted(elem6)
        if not elems:
            print(f'@{setname}')
        else:
            for elem in elems:
                print(f'@{setname:20s}  {elem}')
    return 0


def command_iplist_add():
    """Add entries to iplist."""
    config = minimal_config()
    iplist, timeouts = iplist_parse_config(config)
    cache = read_iplist_cache(iplist)
    setname = CONFIG['parameters'][1]
    if not setname.startswith('@'):
        setname = f'@{setname}'
    if setname not in iplist:
        fail(f'Unknown iplist name: {setname}')
    timeout = max(timeouts[setname]['url_timeout'],
                  timeouts[setname]['dns_timeout'])
    now = datetime.datetime.now(datetime.timezone.utc)
    expire = now + datetime.timedelta(seconds=timeout)

    # Add entries to cache and apply it
    iplist_cache_init(cache, f'manual.{setname}')
    for address in CONFIG['parameters'][2:]:
        ipv = is_ip_address(address)
        if ipv:
            cache[f'manual.{setname}']['ip'][address] = expire
            cache[f'manual.{setname}']['dirty'] = True
        else:
            timeout = parse_hour_min(address, 0)
            if not timeout:
                fail(f'Invalid IP address {address}')
            expire = now + datetime.timedelta(seconds=timeout)
            if timeout > 630720000:  # More than 20 years is "forever"
                expire = datetime.datetime(year=2100, month=1, day=1,
                                           tzinfo=datetime.timezone.utc)

    return iplist_output_values(iplist, cache, active_sets(), {setname})


def command_iplist_del():
    """Delete entries from iplist."""
    config = minimal_config()
    iplist, _dummy_timeouts = iplist_parse_config(config)
    cache = read_iplist_cache(iplist)
    setname = CONFIG['parameters'][1]
    if not setname.startswith('@'):
        setname = f'@{setname}'
    if setname not in iplist:
        fail(f'Unknown iplist name: {setname}')

    # Delete entries from cache and apply it
    iplist_cache_init(cache, f'manual.{setname}')
    for address in CONFIG['parameters'][2:]:
        cache[f'manual.{setname}']['ip'].pop(address, None)
        cache[f'manual.{setname}']['dirty'] = True

    return iplist_output_values(iplist, cache, active_sets(), {setname})


def command_iplist_flush():
    """Delete all entries from iplist."""
    config = minimal_config()
    iplist, _dummy_timeouts = iplist_parse_config(config)
    cache = read_iplist_cache(iplist)
    update_only = set()

    for setname in CONFIG['parameters'][1:]:
        if not setname.startswith('@'):
            setname = f'@{setname}'
        if setname not in iplist:
            fail(f'Unknown iplist name: {setname}')

        update_only.add(setname)
        iplist_cache_init(cache, f'manual.{setname}')
        cache[f'manual.{setname}']['ip'] = {}
        cache[f'manual.{setname}']['dirty'] = True

    return iplist_output_values(iplist, cache, active_sets(), update_only)


def lxml_parser(url, parser, path, text):
    """Parse html/xml text with lxml."""
    # pylint: disable=c-extension-no-member
    try:
        root = lxml.etree.fromstring(text, parser)
        nodes = root.xpath(path)
    except lxml.etree.LxmlError as error:
        warning(f'Failed to parse iplist xml/html "{url}": {error}')
        return None
    return '\n'.join(nodes)


def iplist_url_filters(url, text):
    """Apply filters to downloaded iplist content."""
    for spec in url.split('|')[1:]:
        if not text:
            break

        # pylint: disable=c-extension-no-member
        if spec.startswith('shell:'):
            text = run_program_pipe([spec[6:]], text)
        elif spec.startswith('json:'):
            text = run_program_pipe(['jq', '--raw-output', spec[5:]], text)
        elif spec.startswith('html:') and HAVE_LXML:
            text = lxml_parser(url, lxml.etree.HTMLParser(), spec[5:], text)
        elif spec.startswith('xml:') and HAVE_LXML:
            text = lxml_parser(url, lxml.etree.XMLParser(), spec[4:], text)
    return text


def iplist_cleanup_file(url, text):
    """Remove comments and empty lines from downloaded iplist file."""
    # Apply filters
    text = iplist_url_filters(url, text)
    if not text:
        return []

    # Collect IP addresses
    values = []
    for line in text.splitlines():
        if '#' in line:
            line = line.split('#', 1)[0]
        if ';' in line:
            line = line.split(';', 1)[0]
        for value in line.split():
            if is_ip_address(value):
                values.append(value)
            else:
                warning(f'Unknown entry in iplist "{url}": {value}')
    return values


def get_url_or_file(url, timeout, refresh, cache):
    """Get URL or file content with cache check."""
    now = datetime.datetime.now(datetime.timezone.utc)
    cachevalue = cache.get(url, {}).get('ip', {})
    cacherefresh = cache.get(url, {}).get('refresh')
    if cachevalue and cacherefresh:
        url_timeout = cacherefresh + datetime.timedelta(seconds=timeout)
        next_refresh = cacherefresh + datetime.timedelta(seconds=refresh)
        if now >= url_timeout:
            cache[url]['ip'] = {}
            cache[url]['dirty'] = True
        elif now < next_refresh and CONFIG['force'] < 0:
            verbose(f'Using cached value for "{url}"')
            return

    if url.startswith(('https:', 'http:')):
        value = get_url(url)
    else:
        value = get_file(url)

    if value is not None:
        # Replace list, don't append. These lists are always complete
        # address lists.
        expire = now + datetime.timedelta(seconds=timeout)
        cache[url] = {
            'ip': {addr: expire for addr in iplist_cleanup_file(url, value)},
            'dirty': True,
            'refresh': datetime.datetime.now(datetime.timezone.utc),
        }
        verbose(f'Iplist content for "{url}" refreshed, '
                f'{len(cache[url]["ip"])} entries')


def iplist_parse_timeout(fileline, timeouts, setting, extra):
    """Parse key=value from setting and set timeouts[key] = value."""
    # Parse setting to key=value
    if extra:  # Backward compabililty  (v0.27)
        key = setting
        value = ''.join(extra)
    else:
        key, value = setting.split('=', 1)
    if not value:
        fail(f'{fileline}No value for "{key}"')

    # Update timeout
    if key in ('dns_timeout', 'dns_refresh',
               'url_timeout', 'url_refresh'):
        timeouts[key] = parse_hour_min(value, timeouts[key])
    elif key == 'timeout':
        timeouts['dns_timeout'] = timeouts['url_timeout'] = parse_hour_min(
            value, timeouts['dns_timeout'])
    else:  # refresh
        timeouts['dns_refresh'] = timeouts['url_refresh'] = parse_hour_min(
            value, timeouts['dns_refresh'])


def read_old_iplist_format(cache):
    """Read old iplist.fw (v0.27) files and convert them to new cache format.

    Example entries:

    # add element inet foomuuri fi_6 { 2a13:aec0::/32 timeout 10d }
    add element inet foomuuri foo_4 { 10.0.9.98 timeout 24h }
    add element inet foomuuri dynamic_4 { 10.0.0.4 timeout
      798169s } # 2024-12-11T17:16:28
    """
    for file_key in ('_resolve_file', '_iplist_file', '_iplist_manual_file'):
        try:
            manual = CONFIG[file_key].read_text('utf-8')
        except OSError:
            continue  # Silently ignore all errors

        for line in manual.splitlines():
            if line.startswith('# '):
                line = line[2:]
            items = line.split()
            if (
                    not line.startswith('add element inet') or
                    len(items) not in (10, 12) or
                    not is_ip_address(items[6])
            ):
                continue
            setname = f'manual.@{items[4][:-2]}'

            if len(items) == 12:
                timeout = items[11]  # iplist_manual_file has expire stamp
            else:
                timeout = (datetime.datetime.now(datetime.timezone.utc) +
                           datetime.timedelta(seconds=parse_hour_min(
                               items[8], 0))).strftime('%Y-%m-%dT%H:%M:%S')
            iplist_cache_init(cache, setname)
            cache[setname]['ip'][items[6]] = timeout


def read_iplist_cache(iplist):
    """Read current iplist values from cache."""
    cache = {}
    try:
        cache = json.loads(CONFIG['_iplist_cache_file'].read_text('utf-8'))
    except json.decoder.JSONDecodeError as error:
        warning(f'File {CONFIG["_iplist_cache_file"]}: Broken JSON '
                f'format: {error}')
    except PermissionError as error:
        fail(f'File {CONFIG["_iplist_cache_file"]}: Can\'t read: {error}')
    except FileNotFoundError:
        read_old_iplist_format(cache)

    now = datetime.datetime.now(datetime.timezone.utc)
    for key in list(cache):
        if (
                (key.startswith('@') and key not in iplist) or
                (key.startswith('manual.@') and key[7:] not in iplist) or
                'ip' not in cache[key]
        ):
            # Iplist in cache, not in config
            del cache[key]
            verbose(f'Deleting iplist "{key}" from cache, not in config')
            continue

        # Check top level refresh. Missing or invalid value is ok.
        try:
            refresh = datetime.datetime.strptime(
                cache[key]['refresh'], '%Y-%m-%dT%H:%M:%S')
            refresh = refresh.replace(tzinfo=datetime.timezone.utc)
        except (ValueError, KeyError, TypeError):
            refresh = now
        cache[key]['refresh'] = refresh

        # Check IP address timeouts
        for value in list(cache[key]['ip']):
            try:
                timeout = datetime.datetime.strptime(
                    cache[key]['ip'][value], '%Y-%m-%dT%H:%M:%S')
                timeout = timeout.replace(tzinfo=datetime.timezone.utc)
            except (ValueError, KeyError):
                warning(f'Invalid iplist "{key}" entry "{value}" timestamp '
                        f'in cache')
                timeout = now  # Gets deleted
            cache[key]['ip'][value] = timeout
            if now >= timeout:
                verbose(f'Deleting expired iplist "{key}" entry "{value}"')
                del cache[key]['ip'][value]

        # Don't keep empty sets
        if not cache[key]['ip']:
            del cache[key]
    return cache


def write_iplist_cache(cache):
    """Write iplist cache to disk for next run."""
    for key in cache:
        cache[key].pop('dirty', None)
        if key.startswith(('@', 'manual.@')):
            del cache[key]['refresh']
        else:
            cache[key]['refresh'] = cache[key]['refresh'].strftime(
                '%Y-%m-%dT%H:%M:%S')
        for address, value in cache[key]['ip'].items():
            cache[key]['ip'][address] = value.strftime('%Y-%m-%dT%H:%M:%S')

    save_file(CONFIG['_iplist_cache_file'], cache)


def iplist_parse_config(config):
    """Parse resolve{} and iplist{} sections from config."""
    def_times = {
        'dns_timeout': 86400,   # 24h
        'dns_refresh': 900,     # 15m
        'url_timeout': 886400,  # 10d
        'url_refresh': 86400,   # 24h
        'timeout': 0,           # Backward compability keyword (v0.27)
        'refresh': 0,           # Backward compability keyword
    }
    iplist = {}
    timeouts = {}
    for fileline, line in config.pop('resolve', []) + config.pop('iplist', []):
        name = line[0]

        # Parse global timeout/refresh value
        if (
                (
                    '=' in name and
                    name.split('=')[0] in def_times and
                    len(line) == 1
                ) or (  # Backward compabililty  (v0.27)
                    name in ('timeout', 'refresh') and
                    len(line) > 1
                )
        ):
            iplist_parse_timeout(fileline, def_times, name, line[1:])
            continue

        # Name must be set name, check it
        if not name.startswith('@') or name == '@':
            fail(f'{fileline}Invalid iplist name: {" ".join(line)}')
        check_name(name[1:], fileline, '@')

        # Parse line items to value[]
        if len(line) == 1 or line[1:] == ['-']:  # Empty set
            value = []
        elif line[1] == '+':  # Append to previous value
            value = iplist.get(name, []) + line[2:]
        else:  # Overwrite previous value
            if name in iplist:
                verbose(f'{fileline}Overwriting iplist "{name}" with value '
                        f'"{" ".join(line[1:])}"')
            value = line[1:]

        # Parse pre-entry timeouts
        this_value = []
        this_timeouts = timeouts.get(name, def_times).copy()
        for item in value:
            if '=' in item and item.split('=')[0] in def_times:
                iplist_parse_timeout(fileline, this_timeouts, item, None)
            else:
                this_value.append(item)

        iplist[name] = this_value
        timeouts[name] = this_timeouts
    if CONFIG['verbose'] >= 2:
        for name, values in iplist.items():
            verbose(f'{name:9s}  {" ".join(values)}')
    return iplist, timeouts


def iplist_collect_resolve_list(iplist, timeouts):
    """Collect URLs and hostnames to be resolved."""
    if CONFIG['command'] != 'iplist':
        # Don't resolve anything if called as "foomuuri start".
        # Use cached entries only.
        return {}, {}

    urls = {}
    hostnames = {}
    for name, values in iplist.items():
        for value in values:
            if value.startswith(('https://', 'http://', '.', '/')):
                if value in urls:
                    urls[value] = (
                        min(urls[value][0], timeouts[name]['url_timeout']),
                        min(urls[value][1], timeouts[name]['url_refresh']))
                else:
                    urls[value] = (timeouts[name]['url_timeout'],
                                   timeouts[name]['url_refresh'])
            elif not is_ip_address(value):
                if value in hostnames:
                    hostnames[value] = (min(hostnames[value][0],
                                            timeouts[name]['dns_timeout']),
                                        min(hostnames[value][1],
                                            timeouts[name]['dns_refresh']))
                else:
                    hostnames[value] = (timeouts[name]['dns_timeout'],
                                        timeouts[name]['dns_refresh'])
    if CONFIG['verbose'] >= 2:
        verbose(f'URLs:      {urls}')
        verbose(f'Hostnames: {hostnames}')
    return urls, hostnames


def iplist_cache_init(cache, setname):
    """Add empty setname to cache."""
    if setname in cache:
        return
    cache[setname] = {
        'ip': {},
        'refresh': datetime.datetime.now(datetime.timezone.utc),
    }


def iplist_cache_copy_ip(cache, source, destination):
    """Copy IP addresses from cache[source] to cache[destination]."""
    if source not in cache:
        return
    if cache[source].get('dirty'):
        cache[destination]['dirty'] = True
    for address, timeout in cache[source]['ip'].items():
        # Choose maximum timeout if both dynamic (resolved from hostname)
        # and manually added address exists.
        old_timeout = cache[destination]['ip'].get(address, timeout)
        cache[destination]['ip'][address] = max(old_timeout, timeout)


def iplist_output_add_element(cache, setname, now):
    """Output nft commands to update iplist values to sets.

    Auto-merge in nftables set requires that adds are done set by set, not
    mixing foo_4 and foo_6 adds.
    """
    # Collect addresses to IPv4 and IPv6 sets
    updates = {
        4: set(),
        6: set(),
    }
    for address in cache[setname]['ip']:
        updates[is_ip_address(address)].add(address)

    # Output add to sets
    out('')
    for ipv in (4, 6):
        out(f'flush set inet foomuuri {setname[1:]}_{ipv}')
        if not updates[ipv]:
            continue

        out(f'add element inet foomuuri {setname[1:]}_{ipv} {{')
        for address in sorted(updates[ipv]):
            timeout = cache[setname]['ip'][address]
            seconds = max(1, int((timeout - now).total_seconds()))
            if seconds > 630720000:  # 20 years, add without timeout
                out(f'{address},')
            else:
                out(f'{address} timeout {seconds}s,')
        out('}')


def iplist_output_values(iplist, cache, currently_active, update_only):
    """Output and apply nft commands to add iplist entries."""
    forever = datetime.datetime(year=2100, month=1, day=1,
                                tzinfo=datetime.timezone.utc)
    now = datetime.datetime.now(datetime.timezone.utc)
    error = 0
    for setname in sorted(iplist):
        setvalues = iplist[setname]

        # Collect new values to cache[@setname]
        cache[setname] = {
            'ip': {},
            'refresh': now,
        }
        for value in setvalues:
            # Static IP address doesn't need refresh, so timeout is forever
            if is_ip_address(value):
                cache[setname]['ip'][value] = forever
                cache[setname]['dirty'] = True
                continue

            # Add IPs from URL/hostname
            iplist_cache_copy_ip(cache, value, setname)

        # Add manually added IPs
        iplist_cache_copy_ip(cache, f'manual.{setname}', setname)

        # Check that this set should be updated
        if update_only and setname not in update_only:
            continue  # Filtered away by command line parameter
        if setname not in currently_active:
            warning(f'Iplist "{setname}" does not exist in currently active '
                    f'firewall')
            continue
        if not cache[setname].get('dirty') and CONFIG['force'] < 0:
            verbose(f'Iplist "{setname}" is clean, skipping update')
            continue

        # Print error if set had values, but result is empty
        if (
                setvalues and
                not cache[setname]['ip'] and
                CONFIG['command'] == 'iplist'
        ):
            fail(f'Iplist "{setname}" is empty', False)
            error = 2
        else:
            verbose(f'Updating iplist "{setname}", '
                    f'{len(cache[setname]["ip"])} entries')

        # Add elements to sets
        iplist_output_add_element(cache, setname, now)

    return iplist_apply_values(cache) | error


def iplist_apply_values(cache):
    """Save iplist changes to disk and use nft to apply it."""
    if CONFIG['command'] != 'iplist' or not OUT:
        return 0

    write_iplist_cache(cache)
    save_final(CONFIG['_iplist_apply_file'])
    ret = run_program_rc(CONFIG['_nft_bin'] +
                         ['--file', CONFIG['_iplist_apply_file']])
    if ret:
        fail(f'Failed to update iplists, nftables error code {ret}', False)
    return ret


def command_iplist_refresh(config=None, currently_active=None):
    """Refresh iplist{} entries."""
    # Read minimal config and parse it
    if not config:
        config = minimal_config()
    iplist, timeouts = iplist_parse_config(config)
    urls, hostnames = iplist_collect_resolve_list(iplist, timeouts)

    # Read URLs and files to cache
    cache = read_iplist_cache(iplist)
    for url, (timeout, refresh) in urls.items():
        get_url_or_file(url, timeout, refresh, cache)

    # Resolve hostnames to cache
    resolve_all_hostnames(hostnames, cache)

    # Read active set names from nft if not provided by "foomuuri start"
    if currently_active is None:
        currently_active = active_sets()

        # Special case for testing (non-root) or faked nft_bin
        if not currently_active and (
                not CONFIG['root_power'] or
                CONFIG['nft_bin'] == 'true'
        ):
            currently_active = set(iplist)

    # Default to update all sets, "foomuuri iplist refresh setname" can filter
    update_only = None
    if CONFIG['parameters'][1:]:
        update_only = []
        for name in CONFIG['parameters'][1:]:
            if name.startswith('@'):
                update_only.append(name)
            else:
                update_only.append(f'@{name}')

    # Apply updates to ruleset
    return iplist_output_values(iplist, cache, currently_active, update_only)


def command_iplist():
    """Parse "foomuuri iplist" subcommand."""
    if len(CONFIG['parameters']) >= 1 and CONFIG['parameters'][0] == 'list':
        return command_iplist_list()
    if len(CONFIG['parameters']) >= 3 and CONFIG['parameters'][0] == 'add':
        return command_iplist_add()
    if len(CONFIG['parameters']) >= 3 and CONFIG['parameters'][0] == 'del':
        return command_iplist_del()
    if len(CONFIG['parameters']) >= 2 and CONFIG['parameters'][0] == 'flush':
        return command_iplist_flush()
    if len(CONFIG['parameters']) >= 1 and CONFIG['parameters'][0] == 'refresh':
        return command_iplist_refresh()
    return command_help()


def list_named_counter(command):
    """Parse and print named counter values."""
    data = nft_json(command)
    if not data:
        return 1

    for toplevel in data.get('nftables', []):
        counter = toplevel.get('counter')
        name = (counter or {}).get('name')
        if not name:
            continue
        packet_value = counter.get('packets', 0)
        byte_value = counter.get('bytes', 0)
        print(f'{name:21s}  {packet_value} packets, {byte_value} bytes')
    return 0


def command_list():
    """List currently active ruleset or other things."""
    # List all
    if not CONFIG['parameters']:
        return nft_command('list ruleset')

    # Parse list of possible zone-zone pairs
    config = minimal_config()
    zones = parse_config_zones(config)
    zonepairs = [f'{szone}-{dzone}' for szone in zones for dzone in zones]

    # Parse parameters
    list_type = CONFIG['parameters'][0]
    list_params = CONFIG['parameters'][1:]
    ret = 0

    if list_type == 'macro':
        # List macros. config{} doesn't have macro{} anymore so config
        # must be re-read.
        macros = parse_config_macros(read_config())
        print('macro {')
        for macro in sorted(macros):
            if not macro.startswith('_template_') and (
                    not list_params or
                    macro in list_params or
                    any(item in list_params for item in macros[macro])
            ):
                print(f'  {macro:15s} {" ".join(macros[macro])}')
        print('}')
        return 0

    if list_type == 'counter':
        # List named counters
        if not list_params:
            ret |= list_named_counter('list counters table inet foomuuri')
        else:
            for counter in list_params:
                ret |= list_named_counter(f'list counter inet foomuuri '
                                          f'{counter}')
        return ret

    # List zone-zone rules
    for zone in CONFIG['parameters']:
        if zone not in zonepairs:
            fail(f'Unknown zone-zone: {zone}')
        for ipv in (4, 6):
            ret += nft_command(f'list chain inet foomuuri {zone}_{ipv}')
    return ret


def command_reload():
    """Run start and refresh iplist."""
    # Use same args
    args = [sys.argv[0]]
    for arg in sys.argv[1:]:
        if arg.startswith('--'):
            args.append(arg)

    # Run commands
    for sub, fatal in ((['start'], True),
                       (['iplist', 'refresh'], False)):
        ret = run_program_rc(args + sub)
        if ret and fatal:
            return ret
    return 0


def alarm_handler(_dummy_signum, _dummy_frame):
    """Signal handler to catch input timeout."""
    raise KeyboardInterrupt


def input_timeout(prompt, timeout):
    """Ask something from user with timeout."""
    signal.signal(signal.SIGALRM, alarm_handler)
    signal.alarm(timeout)
    try:
        return input(prompt)
    except KeyboardInterrupt:
        print()
        print('Timeout')
    finally:
        signal.alarm(0)
    return ''


def command_try_reload():
    """Reload, ask confirmation, optionally revert to previous rules.

    Revert is not fail safe. Things can break if:
    - Zone is added/deleted to config and NetworkManager sends new
      "change to zone" request via D-Bus. This happens only if user
      reconfigures NetworkManager or attach ethernet cable or similar.
    - Needed entry is removed from iplist config and foomuuri-iplist.timer
      updates iplist contents. There is a long expiry time for entries so
      there is plenty of time to fix config.

    It is recommended to fix config immediately and run "foomuuri reload" or
    "foomuuri try-reload".
    """
    # Get copy of currently active ruleset
    minimal_config()
    old_rules = run_program_pipe(CONFIG['_nft_bin'] +
                                 ['list', 'table', 'inet', 'foomuuri'], None)
    if not old_rules or not old_rules.startswith('table inet foomuuri {'):
        fail('Foomuuri is not running, use "foomuuri start" instead')

    # Load new ruleset
    error = command_reload()
    if error:
        return error

    # Verify from user if ruleset is ok
    try:
        timeout = int(CONFIG['try-reload_timeout'])
    except ValueError:
        timeout = 15
    print(f'You have {timeout} seconds to accept the changes.')
    answer = input_timeout('Can you establish NEW connections (yes/no)? ',
                           timeout)
    if answer.lower().startswith('y'):
        return 0

    # Revert to old ruleset and return errorcode 2
    warning('Revert to old config')

    save_file(CONFIG['_good_file'], [
        'table inet foomuuri',
        'delete table inet foomuuri',
        '',
        old_rules])
    return run_program_rc(CONFIG['_nft_bin'] +
                          ['--file', CONFIG['_good_file']]) + 2


def seconds_to_human(seconds):
    """Convert seconds int to human readable "19 days, 18:37" format."""
    day = seconds // 86400
    hour = (seconds // 3600) % 24
    minute = (seconds // 60) % 60
    second = seconds % 60
    if day:
        return f'{day} days, {hour:02d}:{minute:02d}:{second:02d}'
    return f'{hour:02d}:{minute:02d}:{second:02d}'


def monitor_state_command(targets, groups, cfg, grouptarget, name):
    """Run command if group/target state changes."""
    # pylint: disable=too-many-locals

    # Log state change
    updown = 'up' if cfg['state'] else 'down'
    now = time.time()
    prev = cfg.get('state_time')
    history = None
    if prev:
        seconds = int(now - prev + 0.5)
        extra = f'previous change was {seconds_to_human(seconds)} ago'
        if grouptarget == 'target':
            for cons in range(0, cfg['history_size']):
                if cfg['history'][-1 - cons] != cfg['state']:
                    break
            historycount = cfg['history'].count(cfg['state'])
            extra = (f'{extra}, consecutive_{updown} {cons}, '
                     f'history_{updown} {historycount}')
            history = ''.join('.' if item else '!' for item in cfg['history'])
    else:
        extra = 'startup change'
    verbose(f'{grouptarget} {name} changed state to {updown}, {extra}', 0)
    if history:
        verbose(f'{grouptarget} {name} history: {history}', 0)
    cfg['state_time'] = cfg['last_down_interval'] = now
    monitor_run_command(targets, groups, cfg, grouptarget, name,
                        updown, extra, history)


def monitor_run_command(targets, groups, cfg, grouptarget, name,
                        updown, extra, history):
    """Run monitor's external command if configured.

    It will receive current state change event info and all states in
    environment variables.
    """
    # pylint: disable=too-many-arguments
    # pylint: disable=too-many-positional-arguments
    proc = cfg[f'command_{updown}']
    if not proc:
        return
    env = {
        # Change state event
        'FOOMUURI_CHANGE_TYPE': grouptarget,
        'FOOMUURI_CHANGE_NAME': env_cleanup(name),
        'FOOMUURI_CHANGE_STATE': updown,
        'FOOMUURI_CHANGE_LOG': extra,
        'FOOMUURI_CHANGE_HISTORY': history or '',
        # List of configured targets
        'FOOMUURI_ALL_TARGET': ' '.join(env_cleanup(item) for item in targets),
        # List of configured groups
        'FOOMUURI_ALL_GROUP': ' '.join(env_cleanup(item) for item in groups),
    }
    for target, icfg in targets.items():
        env[f'FOOMUURI_TARGET_{env_cleanup(target)}'] = (
            'up' if icfg['state'] else 'down')
    for group, icfg in groups.items():
        env[f'FOOMUURI_GROUP_{env_cleanup(group)}'] = (
            'up' if icfg.get('state', True) else 'down')
    run_program_rc(proc, env=env)


def monitor_down_interval(targets, groups, cfg, grouptarget, name):
    """Group/target is still down, run command in regular intervals."""
    now = time.time()
    if now < cfg['last_down_interval'] + cfg['down_interval']:
        return
    cfg['last_down_interval'] = now
    seconds = int(now - cfg['state_time'] + 0.5)
    extra = f'previous change was {seconds_to_human(seconds)} ago'
    verbose(f'{grouptarget} {name} is still down, {extra}', 0)
    monitor_run_command(targets, groups, cfg, grouptarget, name,
                        'down_interval', extra, None)


def monitor_update_groups(targets, groups):
    """Update all group statuses.

    On startup make decision and send event for all groups after first reply
    from any target.
    """
    for group, cfg in groups.items():
        any_up = any(targets[target]['state'] for target in cfg['target'])
        state = cfg.get('state', not any_up)  # Undef in startup
        if not any_up:
            if state:  # Change from up to down
                cfg['state'] = False
                monitor_state_command(targets, groups, cfg, 'group', group)
            else:  # Still down, check interval
                monitor_down_interval(targets, groups, cfg, 'group', group)
        elif any_up and not state:  # Change from down to up
            cfg['state'] = True
            monitor_state_command(targets, groups, cfg, 'group', group)


def monitor_update_target(targets, groups, target, state):
    """Add state to target's history and change its state."""
    # Add new state to end of history
    cfg = targets[target]
    startup_change = False
    if 'history' not in cfg:
        # First reply ever, fill history and force change event
        cfg['history'] = [True] * cfg['history_size']
        startup_change = True
    cfg['history'] = cfg['history'][1:] + [state]

    # Target state can't change if added state is same as current state
    if cfg['state'] == state and not startup_change:
        if not state:
            monitor_down_interval(targets, groups, cfg, 'target', target)
        return

    # Check if target state is changed
    count_up = sum(cfg['history'])
    if cfg['state']:
        # Currently up. Target goes down if:
        # - history has too many downs
        # OR
        # - last n items were down
        if (
                cfg['history_size'] - count_up >= cfg['history_down'] or
                not any(cfg['history'][-cfg['consecutive_down']:])
        ):
            cfg['state'] = False
            monitor_state_command(targets, groups, cfg, 'target', target)
        elif startup_change:  # Always send an event on startup
            monitor_state_command(targets, groups, cfg, 'target', target)

    else:
        # Currently down. Target goes up if:
        # - history has enough ups
        # AND
        # - last n items were up
        if (
                count_up >= cfg['history_up'] and
                all(cfg['history'][-cfg['consecutive_up']:])
        ):
            cfg['state'] = True
            monitor_state_command(targets, groups, cfg, 'target', target)


def monitor_parse_line(targets, groups, target, line):
    """Parse result line from monitor's command."""
    verbose(f'target {target}: {line}', 2)
    stat_ms = None  # Value for statistics, assume error

    if line in ('OK', 'ERROR'):
        # Generic "OK" or "ERROR" reply
        state = line == 'OK'
        if state:
            stat_ms = 1  # There is no time, use 1

    elif line.startswith('ICMP Unreachable (Communication Administratively'):
        # fping reply if packet is rejected
        state = False

    elif 'xmt/rcv/%loss' in line:
        # fping with "--squiet"
        if '0/0/0%' in line:
            return  # --squiet is less than --interval, ignore line
        state = '/100%' not in line  # It's up if loss is less than 100%
        if state:
            try:
                stat_ms = float(line.split('/')[-1])
            except ValueError:
                pass

    else:
        # fping with "--interval"
        match = re.match(r'^[^ ]+ : \[\d+\], (.+)$', line)
        if not match:
            return  # No match, ignore line
        result = match.group(1)
        state = ' bytes, ' in result and ' ms (' in result
        if state:
            try:
                stat_ms = float(result.split()[2])
            except ValueError:
                pass

    # Update target state
    monitor_update_target(targets, groups, target, state)

    # Add result to statistics
    cfg = targets[target]
    cfg['time'] = (cfg['time'] + [stat_ms])[-cfg['statistics_size']:]


def command_option_values(cmd):
    """Quick and dirty parser for "--foo n" and "--foo=n"."""
    ret = {}
    for index, item in enumerate(cmd):
        if not item.startswith('--'):
            continue
        if '=' in item:
            item, value = item.split('=', 1)
        else:
            try:
                value = cmd[index + 1]
            except IndexError:
                value = None
        ret[item] = value
    return ret


def mangle_fping_command(target, cfg):
    """Mangle fping's command line to contain needed options."""
    cmd = cfg['command']
    if 'fping' not in cmd[0]:
        return cmd
    options = command_option_values(cmd)

    # --squiet works even if interface is down or missing. Automatically
    # use it if missing (1s) or --interval=n000 is specified.
    # Use plain --interval if value is not exact second.
    squiet = options.get('--squiet', '')
    interval = options.get('--interval', '')
    if not squiet and not interval:  # Nothing, use 1s / 1000ms
        cmd = cmd[:1] + ['--squiet=1', '--interval=1000'] + cmd[1:]
    elif squiet and not interval:  # squiet specified, use it as interval
        cmd = cmd[:1] + [f'--interval={squiet}000'] + cmd[1:]
    elif not squiet and interval.endswith('000'):  # Use interval as squiet
        cmd = cmd[:1] + [f'--squiet={interval[:-3]}'] + cmd[1:]
    elif squiet and interval:
        try:
            if int(squiet) * 1000 != int(interval):
                warning(f'Mismatching "--squiet" and "--interval" values in '
                        f'target "{target}": {" ".join(cmd)}')
        except ValueError:
            pass

    # Add --loop if missing to get continous reports
    if '--loop' not in options:
        cmd = cmd[:1] + ['--loop'] + cmd[1:]

    return cmd


def monitor_start_targets(targets, groups):
    """Start pinging all targets."""
    started_something = False
    for target, cfg in targets.items():
        # Check if target is already running, or still waiting for restart
        if cfg.get('proc') or cfg['proc_restart'] > time.time():
            continue

        # Assume it is up on first startup
        if 'state' not in cfg:
            cfg['state'] = True

        # Start command (pylint: disable=consider-using-with)
        cmd = mangle_fping_command(target, cfg)
        verbose(f'target {target} command: {" ".join(map(str, cmd))}', 2)
        try:
            cfg['proc'] = subprocess.Popen(cmd,
                                           stdin=subprocess.DEVNULL,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.STDOUT,
                                           encoding='utf-8')
            verbose(f'target {target} monitoring command started')
            started_something = True
        except OSError as error:
            verbose(f'target {target} monitoring command failed: {error}', 0)
            cfg['proc'] = None
            cfg['proc_restart'] = time.time() + 30

            # Mark it as down immediately
            cfg['history'] = [False] * cfg['history_size']
            monitor_update_target(targets, groups, target, False)

    # Small initial sleep so that first read_target() will receive more
    # replies in one go. This helps to get more "target up" events before
    # "group up" events.
    if started_something:
        time.sleep(0.5)


def monitor_close_target(targets, groups, target):
    """Target monitoring command died. Mark it as down."""
    # Terminate proc and schedule restart
    verbose(f'target {target} monitoring command died, restarting it in 30s',
            0)
    cfg = targets[target]
    cfg['proc'].terminate()
    cfg['proc'].wait(timeout=0.1)
    cfg['proc'] = None
    cfg['proc_restart'] = time.time() + 30
    cfg['time'] = []

    # Mark it as down immediately
    cfg['history'] = [False] * cfg['history_size']
    monitor_update_target(targets, groups, target, False)


def monitor_read_targets(targets, groups):
    """Read incoming lines from all targets."""
    # (Re)Start targets if not running
    monitor_start_targets(targets, groups)

    # Wait for incoming lines from any client, up to 10 seconds
    readfd = [item['proc'].stdout for item in targets.values()
              if item['proc']]
    poll = select.select(readfd, [], [], 10.0)

    # Read all incoming lines
    for readfd in poll[0]:
        for target, cfg in targets.items():
            if cfg['proc'] and cfg['proc'].stdout == readfd:
                line = readfd.readline()
                if line:
                    monitor_parse_line(targets, groups, target, line.strip())
                else:
                    monitor_close_target(targets, groups, target)
                break

    # Update all groups
    monitor_update_groups(targets, groups)


def monitor_terminate_targets(targets):
    """Terminate all subprocesses."""
    for cfg in targets.values():
        if cfg['proc']:
            cfg['proc'].terminate()
    for cfg in targets.values():
        if cfg['proc']:
            cfg['proc'].wait(timeout=0.1)


def parse_config_targets(config):
    """Parse "target foo { ... }" entries from config."""
    targets = {}
    names = [item for item in config if item.startswith('target ')]
    for name in names:
        name = name[7:]
        cfg = targets[name] = {
            'command': [],
            'command_up': [],
            'command_down': [],
            'command_down_interval': [],
            'consecutive_up': 20,    # last n were UP        => UP
            'consecutive_down': 10,  # last n were DOWN      => DOWN
            'history_up': 80,        # count of UPs => n     => UP
            'history_down': 30,      # count of DOWNs >= n   => DOWN
            'history_size': 100,
            'statistics_size': 300,     # Keep last 300 results
            'statistics_interval': 60,  # Write it once a minute
            'down_interval': 600,    # How often to run command_down_interval

            # Internal items, don't set in config:
            # 'state': True,         # Up or down
            # 'state_time': time(),  # Timestamp for last state change
            # 'last_down_interval': time(),  # Still down timestamp
            # 'history': [True] * history_size,
            # 'proc': Popen()
            'proc_restart': 0,       # Start immediately
            'time': [],              # Statistics
        }
        fileline = ''
        for fileline, line in config.pop(f'target {name}'):
            if line[0] not in cfg:
                fail(f'{fileline}Unknown keyword: {" ".join(line)}')
            if isinstance(cfg[line[0]], list):
                cfg[line[0]] = line[1:]
            else:
                try:
                    cfg[line[0]] = int(' '.join(line[1:]))
                except ValueError:
                    fail(f'{fileline}Invalid value: {" ".join(line)}')

        # Verify section
        if not cfg['command']:
            fail(f'{fileline}Missing "command" keyword in "target {name}"')
        for key in ('consecutive_up', 'consecutive_down',
                    'history_up', 'history_down'):
            if cfg[key] > cfg['history_size']:
                fail(f'{fileline}{key} is larger than history_size in '
                     f'"target {name}": {cfg[key]} > {cfg["history_size"]}')
        if cfg['history_size'] - cfg['history_up'] >= cfg['history_down']:
            warning(f'{fileline}Possible up-down loop in '
                    f'"target {name}": history_size {cfg["history_size"]} - '
                    f'history_up {cfg["history_up"]} >= '
                    f'history_down {cfg["history_down"]}')

    return targets


def parse_config_groups(config, targets):
    """Parse "group foo { ... }" entries from config."""
    groups = {}
    names = [item for item in config if item.startswith('group ')]
    for name in names:
        name = name[6:]
        cfg = groups[name] = {
            'target': [],
            'command_up': [],
            'command_down': [],
            'command_down_interval': [],
            'down_interval': 600,    # How often to run command_down_interval
        }
        fileline = ''
        for fileline, line in config.pop(f'group {name}'):
            if line[0] not in cfg:
                fail(f'{fileline}Unknown keyword: {" ".join(line)}')
            if isinstance(cfg[line[0]], list):
                cfg[line[0]] = line[1:]
            else:
                try:
                    cfg[line[0]] = int(' '.join(line[1:]))
                except ValueError:
                    fail(f'{fileline}Invalid value: {" ".join(line)}')

        # Verify section
        target = cfg.get('target')
        if not target:
            fail(f'{fileline}Missing "target" keyword in "group {name}"')
        for item in target:
            if item not in targets:
                fail(f'{fileline}Undefined target "{item}" in "group {name}"')

    return groups


def monitor_write_statistics(targets):
    """Write statistics file."""
    stats = {
        target: {
            # Config
            'consecutive_up': cfg['consecutive_up'],
            'consecutive_down': cfg['consecutive_down'],
            'history_up': cfg['history_up'],
            'history_down': cfg['history_down'],
            # State
            'state': cfg['state'],
            'history': cfg.get('history', []),
            # fping time
            'time': cfg['time'],
        }
        for target, cfg in targets.items()
    }
    save_file(CONFIG['_monitor_statistics_file'], stats)


def command_monitor():
    """Monitor targets and run command when their state changes up or down."""
    CONFIG['keep_going'] = 1
    while CONFIG['keep_going']:
        # Read minimal config
        config = minimal_config()
        targets = parse_config_targets(config)
        groups = parse_config_groups(config, targets)

        # Exit silently with OK if no targets defined in configuration
        if not targets:
            if HAVE_NOTIFY:
                notify('READY=1')
                notify('STOPPING=1')
            return 0
        daemonize()

        # Define reload/stop signal handler
        def signal_handler(sig, _dummy_frame):
            if sig == signal.SIGINT:
                CONFIG['keep_going'] = 0
                if HAVE_NOTIFY:
                    notify('STOPPING=1')
            else:
                CONFIG['keep_going'] = 2
                if HAVE_NOTIFY:
                    notify('RELOADING=1')

        CONFIG['keep_going'] = 1
        signal.signal(signal.SIGHUP, signal_handler)
        signal.signal(signal.SIGINT, signal_handler)
        save_file(CONFIG['_run_dir'] / 'foomuuri-monitor.pid',
                  [str(os.getpid())])

        # Start monitoring
        verbose('Target monitor ready', 0)
        if HAVE_NOTIFY:
            notify('READY=1')
        stat_interval = min(cfg['statistics_interval']
                            for cfg in targets.values())
        next_stat = time.time() + stat_interval / 5
        while CONFIG['keep_going'] == 1:
            monitor_read_targets(targets, groups)

            # Periodic statistics file update
            if stat_interval and time.time() >= next_stat:
                next_stat += stat_interval
                monitor_write_statistics(targets)

        monitor_terminate_targets(targets)

    return 0


def command_help(error=True):
    """Print command line help."""
    # pylint: disable=too-many-statements
    print(f'Foomuuri {VERSION}')
    print()
    print(f'Usage: {sys.argv[0]} {{options}} command')
    print()
    print('Available commands:')
    print()
    print('  start            Load configuration files and generate ruleset')
    print('  stop             Remove ruleset')
    print('  reload           Same as start, followed by iplist refresh')
    print('  try-reload       Same as reload, ask confirmation to keep new '
          'config')
    print('  status           Show current status: running, zone-interface '
          'mapping')
    print('  check            Verify configuration files')
    print('  block            Load "block all traffic" ruleset')
    print('  list             List active ruleset')
    print('  list zone-zone {zone-zone...}')
    print('                   List active ruleset for zone-zone')
    print('  list macro       List all known macros')
    print('  list macro name {name...}')
    print('                   List all macros with specified name or value')
    print('  list counter     List all named counters')
    print('  list counter name {name...}')
    print('                   List named counter with specified name')
    print('  iplist list      List entries in all configured iplists')
    print('  iplist list name {name...}')
    print('                   List entries in named iplist')
    print('  iplist add name {timeout} ipaddress {ipaddress...}')
    print('                   Add or refresh IP address to iplist')
    print('  iplist del name ipaddress {ipaddress...}')
    print('                   Delete IP address from iplist')
    print('  iplist flush name {name...}')
    print('                   Delete all IP addresses from iplist')
    print('  iplist refresh   Refresh all iplists now')
    print('  iplist refresh name {name...}')
    print('                   Refresh named iplists now')
    print('  set interface {interface} zone {zone}')
    print('                   Change interface to zone')
    print('  set interface {interface} zone -')
    print('                   Remove interface from all zones')
    print()
    print('Available options:')
    print()
    print('  --version        Print version')
    print('  --verbose        Verbose output')
    print('  --quiet          Be quiet')
    print('  --force          Force some operations, don\'t check anything')
    print('  --soft           Don\'t force operations, check more')
    print('  --fork           Fork as a background daemon process')
    print('  --syslog         Enable syslog logging')
    print('  --set=option=value')
    print('                   Set config option to value')
    print()
    print('Internally used commands:')
    print()
    print('  dbus             Start D-Bus daemon')
    print('  monitor          Start target monitor daemon')
    if error:
        fail()
    return 0


def parse_command_line():
    """Parse command line to CONFIG[command] and CONFIG[parameters]."""
    for arg in sys.argv[1:]:
        if arg == '--help':
            CONFIG['command'] = 'help'
        elif arg == '--version':
            print(VERSION)
            sys.exit(0)
        elif arg in ('--verbose', '--force', '--fork', '--syslog'):
            CONFIG[arg[2:]] += 1
        elif arg == '--quiet':
            CONFIG['verbose'] -= 1
        elif arg == '--soft':
            CONFIG['force'] -= 1
        elif arg.startswith('--set='):
            if arg.count('=') == 1:
                fail(f'Invalid syntax for --set=option=value: {arg}')
            _dummy, option, value = arg.split('=', 2)
            if option not in CONFIG:
                fail(f'Unknown foomuuri{{}} option: {arg}')
            CONFIG[option] = value
        elif not CONFIG['command']:
            CONFIG['command'] = arg
        else:
            CONFIG['parameters'].append(arg)
    config_to_pathlib()  # Needed to read config files and for --set=x_dir=y


def run_command():
    """Run CONFIG[command]."""
    # Only some commands can take arguments
    if CONFIG['parameters'] and CONFIG['command'] not in (
            'list', 'iplist', 'set', 'help'):
        return command_help()

    # Help doesn't need root
    if not CONFIG['command'] or CONFIG['command'] == 'help':
        return command_help(error=False)

    # Initialize syslogging
    if CONFIG['syslog']:
        syslog.openlog('foomuuri', syslog.LOG_PID, syslog.LOG_DAEMON)

    # Warning if not running as root
    CONFIG['root_power'] = not os.getuid()
    if not CONFIG['root_power']:
        warning('Foomuuri should be run as "root"')

    # Run command
    handler = {
        'start': command_start,
        'stop': command_stop,
        'reload': command_reload,
        'try-reload': command_try_reload,
        'status': command_status,
        'check': command_start,
        'block': command_block,
        'list': command_list,
        'iplist': command_iplist,
        'set': command_set,
        'dbus': command_dbus,
        'monitor': command_monitor,
    }.get(CONFIG['command'])
    if not handler:
        fail(f'Unknown command: {CONFIG["command"]}')
    return handler()


def main():
    """Parse command line and run command."""
    parse_command_line()
    return run_command()


if __name__ == '__main__':
    try:
        sys.exit(main())
    except BrokenPipeError:
        # Python flushes standard streams on exit; redirect remaining output
        # to devnull to avoid another BrokenPipeError at shutdown
        os.dup2(os.open(os.devnull, os.O_WRONLY), sys.stdout.fileno())
        sys.exit(1)
