#!/usr/bin/python3
# pylint: disable=too-many-lines

"""Foomuuri - Multizone bidirectional nftables firewall.

Copyright 2023, Kim B. Heino, Foobar Oy <b@bbbs.net>

License: GPL-2.0-or-later
"""

import datetime
import ipaddress
import itertools
import json
import os
import pathlib
import re
import select
import shlex
import signal
import socket
import subprocess
import sys
import time
import unicodedata
import dbus
import dbus.mainloop.glib
import dbus.service
import requests
from gi.repository import GLib


# SystemD notify support is optional
try:
    from systemd.daemon import notify
    HAVE_NOTIFY = True
except ImportError:
    HAVE_NOTIFY = False


VERSION = '0.22'

CONFIG = {
    # Parsed foomuuri{} from config files
    'log_rate': '1/second burst 3',
    'log_input': 'yes',
    'log_output': 'yes',
    'log_forward': 'yes',
    'log_rpfilter': 'yes',
    'log_invalid': 'no',
    'log_smurfs': 'no',
    'log_level': 'level info',
    'localhost_zone': 'localhost',
    'dbus_zone': 'public',
    'rpfilter': 'yes',
    'counter': 'no',
    'set_size': '65535',
    'recursion_limit': '65535',
    'priority_offset': '5',
    'dbus_firewalld': 'no',
    'nft_bin': 'nft',

    # Directories and files. Files are relative to state_dir.
    'etc_dir': '/etc/foomuuri',
    'share_dir': '/usr/share/foomuuri',
    'state_dir': '/var/lib/foomuuri',
    'run_dir': '/run/foomuuri',
    'good_file': 'good.fw',
    'next_file': 'next.fw',
    'dbus_file': 'dbus.fw',
    'resolve_file': 'resolve.fw',
    'iplist_file': 'iplist.fw',
    'iplist_manual_file': 'iplist-manual.fw',
    'zone_file': 'zone',

    # Parsed command line parameters - used internally
    'command': '',
    'parameters': [],
    'devel': False,
    'verbose': 0,
}

OUT = []       # Generated nftables ruleset / commands
LOGRATES = {}  # Lograte names and limits
HELPERS = []   # List of helpers: (helper-object, protocol, ports)


def fail(error=None):
    """Exit with error message."""
    if error:
        print(f'Error: {error}')
    sys.exit(1)


def verbose(line):
    """Print line if --verbose was given in command line."""
    if CONFIG['verbose']:
        print(line, flush=True)


def out(line):
    """Add single line to ruleset."""
    OUT.append(line)


def run_program_rc(args, *, env=None):
    """Run external program and return its errorcode. Print its output."""
    if not args:
        return 0
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              env=env, timeout=60)
    except OSError as error:
        print(f'Error: Failed to run command "{" ".join(args)}": {error}',
              flush=True)
        return 1
    output = proc.stdout.rstrip()
    if output:
        print(output, flush=True)
    return proc.returncode


def run_program_output(args, fileline, shell=True):
    """Run external program and return its output. Failure is fatal."""
    if not args:
        return ''
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              shell=shell, timeout=60)
    except OSError as error:
        fail(f'{fileline}Failed to run command "{" ".join(args)}": {error}')
    if proc.returncode:
        fail(f'{fileline}Failed to run command "{" ".join(args)}": '
             f'return code {proc.returncode}')
    return proc.stdout.rstrip()


def run_program_json(args):
    """Quietly run external program and return its output as json."""
    if not args:
        return {}
    try:
        proc = subprocess.run(args, check=False, stdout=subprocess.PIPE,
                              stderr=subprocess.STDOUT, encoding='utf-8',
                              timeout=60)
    except OSError:
        return None
    if proc.returncode:
        return None
    try:
        return json.loads(proc.stdout)
    except json.decoder.JSONDecodeError:
        return None


def shell_expansion(content, fileline):
    """Expand $(shell command) in configuration file.

    This is the first expansion done. Failure in command is fatal.
    """
    while '$(shell ' in content:
        prefix, postfix = content.split('$(shell ', 1)
        if ')' not in postfix:
            postfix = postfix.splitlines()[0]
            fail(f'{fileline}"$(shell" without ")" in command: {postfix}')
        shell, postfix = postfix.split(')', 1)
        content = f'{prefix}{run_program_output(shell, fileline)}{postfix}'
    return content


def read_config():
    """Read att config files to config dict: section -> lines[].

    Files are read in alphabetical order, ignoring backup and hidden files.
    """
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements
    # pylint: disable=no-member  # rglob
    configfiles = (sorted(list(CONFIG['share_dir'].rglob('*.conf'))) +
                   sorted(list(CONFIG['etc_dir'].rglob('*.conf'))))
    configfiles = [name for name in configfiles
                   if name.name[0] not in ('.', '#')]

    # There characters will combine to single word in shlex
    wordchars = ''.join(chr(letter) for letter in range(33, 256)
                        # excludes: " # ' ; { }
                        if letter not in (34, 35, 39, 59, 123, 125))

    # Read all config files
    config = {}        # Final config dict
    section = None     # Currently open section name
    section_line = {}  # Section_name -> filename_line for error messages
    for filename in configfiles:
        try:
            content = filename.read_text(encoding='utf-8')
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')

        # Expand $(shell in configuration file. Do this for whole file instead
        # of single line so that command can return multiple lines.
        content = shell_expansion(content, f'File {filename}: ')

        # Parse single config file content
        continuation = ''
        for linenumber, line in enumerate(content.splitlines()):
            # Combine lines if there is \ at end of line
            if line.endswith('\\'):
                continuation += line[:-1] + ' '
                continue
            line = continuation + line
            continuation = ''

            # Parse single line to list of words. Keep " as is, it can be
            # used to avoid macro expansion.
            fileline = f'File {filename} line {linenumber + 1}: '
            try:
                lexer = shlex.shlex(line, punctuation_chars=';{')
                lexer.wordchars = wordchars
                tokens = list(lexer)
            except ValueError as error:
                fail(f'{fileline}Can\'t parse line: {error}')
            if not tokens:
                continue

            # "}" is end of section
            if len(tokens) == 1 and tokens[0] == '}':  # End of section
                if not section:
                    fail(f'{fileline}Extra "}}"')
                section = None

            # "foo {" is section start
            elif len(tokens) == 2 and tokens[1] == '{':
                if section:
                    fail(f'{fileline}New "{" ".join(tokens)}" while section '
                         f'"{section}" is still open')
                section = tokens[0]
                if section.startswith('_'):  # _name is protected
                    fail(f'{fileline}Unknown section: {section}')
                if section not in config:
                    config[section] = []
                    section_line[section] = fileline

            # "template foo {" / "target foo" / "group foo" is section start
            elif (
                    len(tokens) == 3 and
                    tokens[0] in ('template', 'target', 'group') and
                    tokens[2] == '{'
            ):
                if section:
                    fail(f'{fileline}New "{" ".join(tokens)}" while section '
                         f'"{section}" is still open')
                section = f'{tokens[0]} {tokens[1]}'
                if section not in config:
                    config[section] = []
                    section_line[section] = fileline

            # "foo" which is not inside section
            elif not section:
                fail(f'{fileline}Unknown line: {" ".join(tokens)}')

            # "foo" inside section
            else:
                config[section].append((fileline, tokens))

        # End of file checks
        if continuation:
            fail(f'File {filename}: Continuation "\\" at end of file')
        if section:
            fail(f'File {filename}: Section "{section}" is missing "}}" at '
                 f'end of file')

    # Include section_name -> filename_line to config for error messages
    config['_section_line'] = section_line
    return config


def config_to_pathlib():
    """Convert str paths in CONFIG{} to pathlib.Paths."""
    keys = list(CONFIG)
    for key in keys:
        if key.endswith('_dir'):
            CONFIG[key] = pathlib.Path(CONFIG[key])
    for key in keys:
        if key.endswith('_file'):
            CONFIG[key] = CONFIG['state_dir'] / CONFIG[key]


def parse_config_macros(config):
    """Parse macro{} from config. Recursively expand macro in macro{}."""
    # Parse macro{} to dict
    macros = {}
    macroline = {}
    for fileline, macro in config.pop('macro', []):
        key = macro[0]
        value = macro[1:]
        macroline[key] = fileline
        if value[0] == '+':  # append
            macros[key] = macros.get(key, []) + value[1:]
        else:
            macros[key] = value  # overwrite

    # Expand macro in macro{}. Keep going as long as there was some expansion
    # done.
    while True:
        found = False
        for check, cvalue in macros.items():
            for macro, value in macros.items():
                try:
                    pos = value.index(check)
                except ValueError:
                    continue
                if check == macro:  # Macro "foo" expands to "foo bar"
                    fail(f'{macroline[macro]}Macro "{macro}" expands to '
                         f'itself: {" ".join(value)}')
                # Expand macro
                macros[macro] = value[:pos] + cvalue + value[pos + 1:]
                found = True
        if not found:  # No new expansion was done
            return macros


def expand_single_line(fileline, line, macros):
    """Expand first matching macro in line. Repeat call to expand all.

    This can result to multiple lines. Return None if no expansion was done.
    """
    for macro, mvalue in macros.items():
        try:
            pos = line.index(macro)
        except ValueError:
            continue

        # Macro found, expand it and return list of expanded lines
        prefix = line[:pos]
        suffix = line[pos + 1:]
        return [(fileline, prefix + list(group) + suffix)
                for is_split, group in itertools.groupby(
                        mvalue, lambda spl: spl == ';') if not is_split]
    return None


def expand_macros(config):
    """Expand all macros in all config sections."""
    macros = parse_config_macros(config)
    for section, orig_lines in config.items():
        if section in ('foomuuri', 'zone') or section.startswith('_'):
            continue  # Don't expand in these sections

        new_lines = []
        while orig_lines:
            # Get next line and expand macros there
            fileline, line = orig_lines.pop(0)
            expanded = expand_single_line(fileline, line, macros)

            # Repeat call if some expansion was done
            if expanded:
                orig_lines = expanded + orig_lines
            else:  # Not found, go to next line
                new_lines.append((fileline, line))
        config[section] = new_lines


def remove_quotes(config):
    """Change "foo" to foo in config entries.

    This is called after macro expansion so that '"ssh"' is 'ssh', not
    'tcp 22'.
    """
    for section, lines in config.items():
        if section.startswith('_'):
            continue
        for _dummy_fileline, line in lines:
            for index, item in enumerate(line):
                if item.startswith('"') and item.endswith('"'):  # "foo"
                    line[index] = item[1:-1]
                elif item.startswith("'") and item.endswith("'"):  # 'foo'
                    line[index] = item[1:-1]


def parse_config_foomuuri(config):
    """Parse foomuuri{} from config to CONFIG{}."""
    for fileline, line in config.pop('foomuuri', []):
        name = line[0]
        value = ' '.join(line[1:])
        if name not in CONFIG:
            fail(f'{fileline}Unknown foomuuri{{}} option: {" ".join(line)}')
        if value.startswith('+ '):  # append
            CONFIG[name] = f'{CONFIG[name]} {value[2:]}'
        else:
            CONFIG[name] = value  # overwrite
    config_to_pathlib()

    # Convert chain priority offset to nft. It is already converted on D-Bus
    # handler reload.
    if not CONFIG['priority_offset']:
        priority = 0
    else:
        try:
            priority = int(CONFIG['priority_offset'].replace(' ', ''))
        except ValueError:
            fail(f'Invalid foomuuri{{}} priority_offset: '
                 f'{CONFIG["priority_offset"]}')
    if priority == 0:
        CONFIG['priority_offset'] = ''
    elif priority > 0:
        CONFIG['priority_offset'] = f' + {priority}'
    else:
        CONFIG['priority_offset'] = f' - {-priority}'

    # Add "packets" to log rates
    for key in list(CONFIG):
        if (
                key.startswith('log_') and
                re.match(r'^\d+/(second|minute|hour) burst \d+$', CONFIG[key])
        ):
            CONFIG[key] += ' packets'


def parse_config_zones(config):
    """Parse zone{} from config."""
    zones = {}
    for fileline, line in config.pop('zone', []):
        if line[0] in zones:
            fail(f'{fileline}Zone is already defined: {line[0]}')
        zones[line[0]] = {'interface': line[1:]}
    return zones


def parse_config_zonemap(config):
    """Parse zonemap{} rules from config."""
    zonemap = []
    for fileline, line in config.pop('zonemap', []):
        rule = parse_rule_line((fileline, line))
        if not rule['new_dzone'] and not rule['new_szone']:
            fail(f'{fileline}Zonemap without "new_dzone" or "new_szone" '
                 f'is a no-op: {" ".join(line)}')
        zonemap.append(rule)
    return zonemap


def parse_config_rule_section(config, section):
    """Parse snat{}, dnat{}, prerouting{} etc. rules from config."""
    lines = config.pop(section, [])
    rules = [parse_rule_line(line) for line in lines]
    return rules


def parse_resolve(config, section, timeout=None, refresh=None):
    """Parse resolve{} and iplist{} from config.

    Line syntax:
      @name fqdn fqdn2 fqdn3               # resolve
      @name url url2 filename filename2    # iplist

    Entry "timeout 10h 30m" (defaults to 24h if "resolve", 10d if "iplist")
    is how long found IP addresses are remembered.

    Entry "refresh 3h" is how often iplist entries will be fetched.
    """
    ret = {
        'timeout': timeout,
        'refresh': refresh,
    }
    for fileline, line in config.pop(section, []):
        if line[0] in ('timeout', 'refresh') and len(line) > 1:
            ret[line[0]] = ''.join(line[1:])
        else:
            if not line[0].startswith('@') or len(line[0]) == 1:
                fail(f'{fileline}Bad {section} name: {" ".join(line)}')
            for ipv in (4, 6):  # "@foo" to "@foo_4" and "@foo_6"
                name = f'{line[0]}_{ipv}'
                if len(line) == 1 or line[1:] == ['-']:
                    ret[name] = []
                elif line[1] == '+':  # append
                    ret[name] = ret.get(name, []) + line[2:]
                else:  # overwrite
                    ret[name] = line[1:]
    return ret


def parse_config_hook(config):
    """Parse hook{} from config."""
    for fileline, line in config.pop('hook', []):
        if line[0] not in (
                'pre_start', 'post_start',
                'pre_stop', 'post_stop',
        ):
            fail(f'{fileline}Unknown hook: {" ".join(line)}')
        CONFIG[line[0]] = line[1:]


def minimal_config():
    """Read and parse minimal config."""
    config = read_config()
    expand_macros(config)
    remove_quotes(config)
    parse_config_foomuuri(config)
    return config


def is_ipv4_address(value):
    """Is value IPv4 address, network or interval."""
    if value.count('-') == 1:  # Interval "IP-IP"
        addr_from, addr_to = value.split('-')
        return is_ipv4_address(addr_from) and is_ipv4_address(addr_to)
    try:  # Address "IP"
        return isinstance(ipaddress.ip_address(value), ipaddress.IPv4Address)
    except ValueError:
        try:  # Network "IP/mask"
            return isinstance(ipaddress.ip_network(value),
                              ipaddress.IPv4Network)
        except ValueError:
            return False


def is_ipv6_address(value):
    """Is value IPv6 address, network or interval."""
    if value.count('/-') == 1:  # Suffix mask "IP/-mask"
        addr, maskstr = value.split('/-')
        try:
            mask = int(maskstr)
            return 0 <= mask <= 128 and is_ipv6_address(addr)
        except ValueError:
            return False

    if value.count('-') == 1:  # Interval "IP-IP"
        addr_from, addr_to = value.split('-')
        return is_ipv6_address(addr_from) and is_ipv6_address(addr_to)

    # Python's ipaddress library doesn't handle "[ipv6]" notation.
    # Strip [] before validating the address. nft handles [] fine, it will
    # strip them.
    if value.startswith('['):
        if value.endswith(']'):  # "[ipv6]"
            value = value[1:-1]
        elif ']/' in value:      # "[ipv6]/56"
            value = value[1:].replace(']/', '/')

    try:  # Address "IP"
        return isinstance(ipaddress.ip_address(value), ipaddress.IPv6Address)
    except ValueError:
        try:  # Network "IP/mask"
            return isinstance(ipaddress.ip_network(value),
                              ipaddress.IPv6Network)
        except ValueError:
            return False


def is_ip_address(value):
    """Check if value is IPv4 or IPv6 address.

    Return 4, 6, or 0 if not detected.
    """
    if value.startswith('-'):  # Negative is handled in single_or_set()
        value = value[1:]
    if is_ipv4_address(value):
        return 4
    if is_ipv6_address(value):
        return 6
    return 0


def is_port(value):
    """Check if value is port: "1", "1-2" or "1,2", or any combination."""
    for item in value.split(','):
        for number in item.split('-'):
            if not number.isnumeric():
                return False
    return True


def verify_rule_sanity(rule, fileline):
    """Do some basic verify that single rule is valid."""
    for key in ('saddr_rate_name', 'daddr_rate_name', 'saddr_daddr_rate_name',
                'helper', 'counter'):
        value = rule[key] or ''
        if ' ' in value:
            fail(f'{fileline}"{key}" must be single word: {value}')

    for key in ('global_rate', 'saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
        if not rule[key]:
            continue
        if re.match(r'^\d+/(second|minute|hour) burst \d+$', rule[key]):
            rule[key] += ' packets'
        if not (
                re.match(r'^\d+/(second|minute|hour)( burst \d+ packets)?$',
                         rule[key]) or
                re.match(r'^ct count (over )?\d+$', rule[key])
        ):
            fail(f'{fileline}Invalid "{key}" value: {rule[key]}')

    for basic, extra in (
            ('protocol', 'sport'),
            ('protocol', 'dport'),
            ('saddr_rate', 'saddr_rate_name'),
            ('saddr_rate', 'saddr_rate_mask'),
            ('daddr_rate', 'daddr_rate_name'),
            ('daddr_rate', 'daddr_rate_mask'),
            ('saddr_daddr_rate', 'saddr_daddr_rate_name'),
            ('saddr_daddr_rate', 'saddr_daddr_rate_mask'),
    ):
        if rule[extra] and not rule[basic]:
            fail(f'{fileline}"{extra}" without "{basic}" is not valid')

    if rule['policy'] == 'mark_set' and not rule['mark_set']:
        fail(f'{fileline}"{rule["policy"]}" without value is not valid')

    if rule['policy'] in ('snat', 'dnat') and not rule['to']:
        fail(f'{fileline}"{rule["policy"]}" without "to" is not valid')

    if rule['policy'] == 'masquerade' and rule['to']:
        fail(f'{fileline}"{rule["policy"]}" with "to" is not valid')


def parse_rule_line(fileline_line):
    """Parse single config section line to rule dict.

    This parser is quite relaxed. Words can be in almost any order. For
    example, all following entries are equal:
      tcp 22 log           <- preferred
      tcp 22 accept log
      accept tcp 22 log
      log tcp accept 22
    """
    # pylint: disable=too-many-branches
    fileline, line = fileline_line
    ret = {
        # Basic rules
        'policy': 'accept',
        'cast': 'unicast',
        'protocol': None,
        'saddr': None,
        'sport': None,
        'daddr': None,
        'dport': None,
        'oifname': None,
        'iifname': None,
        # Is this IPv4/6 specific rule?
        'ipv4': False,
        'ipv6': False,
        # Rate limits
        'global_rate': None,
        'saddr_rate': None,
        'saddr_rate_mask': None,
        'saddr_rate_name': None,
        'daddr_rate': None,
        'daddr_rate_mask': None,
        'daddr_rate_name': None,
        'saddr_daddr_rate': None,
        'saddr_daddr_rate_mask': None,
        'saddr_daddr_rate_name': None,
        # User limits
        'uid': None,
        'gid': None,
        # Zonemap specific rules
        'szone': None,
        'dzone': None,
        'new_szone': None,
        'new_dzone': None,
        # Misc rules
        'to': None,  # snat/dnat to
        'counter': None,
        'helper': None,
        'sipsec': None,
        'dipsec': None,
        'log': None,
        'log_level': None,
        'nft': None,
        'mss': None,
        'template': None,
        'mark_set': None,
        'mark_match': None,

        # Internal housekeeping
        'fileline': fileline,  # For error messages
    }

    keyword = None
    for item in line:
        # "tcp 22" is shortcut for "tcp dport 22"
        if not keyword and is_port(item):
            keyword = 'dport'
            if ret[keyword] is None:
                ret[keyword] = ''

        # First item after start keyword is always a parameter for it, except
        # for "log" or "counter". Log will have good default value if not
        # defined. Counter without parameter will create anonymous counter.
        if keyword and not ret[keyword] and keyword not in ('log', 'counter'):
            ret[keyword] = item
            if keyword == 'protocol':  # Single word only
                keyword = None

        # Non-start keywords
        elif item in ('accept', 'drop', 'return',
                      'masquerade', 'snat', 'dnat',
                      'mark_save', 'mark_restore'):
            ret['policy'] = item
            keyword = None
        elif item == 'reject':
            ret['policy'] = 'reject with icmpx admin-prohibited'
            keyword = None
        elif item in ('multicast', 'broadcast'):
            ret['cast'] = item
            keyword = None
        elif item in ('tcp', 'udp', 'icmp', 'icmpv6', 'igmp', 'esp'):
            # "igmp" and "esp" are for backward compability (v0.21)
            ret['protocol'] = item
            keyword = None
        elif item in ('ipv4', 'ipv6'):
            ret[item] = True
            keyword = None
        elif item in ('sipsec', 'dipsec'):
            ret[item] = 'exists'
            keyword = None
        elif item in ('-sipsec', '-dipsec'):
            ret[item[1:]] = 'missing'
            keyword = None

        # Start keywords
        elif item in ('protocol',
                      'saddr', 'sport',
                      'daddr', 'dport',
                      'oifname', 'iifname',
                      'global_rate',
                      'saddr_rate', 'saddr_rate_mask', 'saddr_rate_name',
                      'daddr_rate', 'daddr_rate_mask', 'daddr_rate_name',
                      'saddr_daddr_rate', 'saddr_daddr_rate_mask',
                      'saddr_daddr_rate_name',
                      'uid', 'gid',
                      'szone', 'dzone',
                      'new_szone', 'new_dzone',
                      'to',
                      'counter',
                      'helper',
                      'log',
                      'log_level',
                      'nft',
                      'mss',
                      'template',
                      'mark_set',
                      'mark_match',
                      ):
            keyword = item
            if ret[keyword] is None:
                ret[keyword] = ''
            if item == 'mark_set':  # policy and start keyword
                ret['policy'] = item

        # More parameters for keyword
        elif keyword:
            if ret[keyword]:
                ret[keyword] += ' '
            ret[keyword] += item

        # Unknown word after non-start keyword
        else:
            fail(f'{fileline}Can\'t parse line: {" ".join(line)}')

    verify_rule_sanity(ret, fileline)
    return ret


def parse_config_templates(config):
    """Parse "template foo { ... }" rules from config."""
    templates = {}
    names = [item for item in config if item.startswith('template ')]
    for name in names:
        lines = config.pop(name)
        templates[name[9:]] = [parse_rule_line(line) for line in lines]
    return templates


def parse_config_rules(config):
    """Parse "zone-zone" rules from config.

    All other sections must be already parsed and removed from config.
    """
    rules = {}
    for section, lines in config.items():
        if section.startswith('_'):
            continue
        try:
            szone, dzone = section.split('-')
        except ValueError:
            fail(f'{config["_section_line"][section]}Unknown section: '
                 f'{section}')
        rules[(szone, dzone)] = [parse_rule_line(line) for line in lines]
    return rules


def insert_single_any(any_rules, rules, szone, dzone):
    """Insert single any_rules to rules[(szone, dzone)]."""
    if not any_rules or szone == dzone:
        return
    rules[(szone, dzone)] = any_rules + rules.get((szone, dzone), [])


def insert_any_zones(zones, rules):
    """Insert "any-zone", "zone-any" and "any-any" rules to "zone-zone" rules.

    These are inserted to beginning of zone-zone rules.
    """
    for zone in zones:
        any_rules = rules.pop(('any', zone), [])  # any-zone
        for szone in zones:
            insert_single_any(any_rules, rules, szone, zone)

        any_rules = rules.pop((zone, 'any'), [])  # zone-any
        for dzone in zones:
            insert_single_any(any_rules, rules, zone, dzone)

    any_rules = rules.pop(('any', 'any'), [])  # any-any
    for szone in zones:
        for dzone in zones:
            insert_single_any(any_rules, rules, szone, dzone)


def expand_templates(rules, templates):
    """Expand rule "template foo" in rules."""
    for zonepair in rules:
        index = 0
        while index < len(rules[zonepair]):
            # Is this rule "template foo"?
            template = rules[zonepair][index]['template']
            if not template:
                index += 1
                continue
            fileline = rules[zonepair][index]['fileline']

            # Replace current rule with template's content
            if template not in templates:
                fail(f'{fileline}Unknown template name: {template}')
            rules[zonepair] = (rules[zonepair][:index] +
                               templates[template] +
                               rules[zonepair][index + 1:])
            if len(rules[zonepair]) > int(CONFIG['recursion_limit']):
                fail(f'{fileline}Possible template loop: '
                     f'{zonepair[0]}-{zonepair[1]} template {template}')


def verify_config(config, zones, rules):
    """Verify config data."""
    if not zones:
        fail('No zones defined in section zone{}')

    localhost = CONFIG['localhost_zone']
    if localhost not in zones:
        zones[localhost] = {'interface': []}
        print(f'{config["_section_line"]["zone"]}Warning: Zone "{localhost}" '
              f'is missing from zone{{}}, adding it', flush=True)

    if zones[localhost]['interface']:
        fail(f'{config["_section_line"]["zone"]}Zone "{localhost}" has '
             f'interfaces "{" ".join(zones[localhost]["interface"])}", '
             f'it must be empty')

    if CONFIG['dbus_zone'] not in zones:
        print(f'Warning: Config option dbus_zone value '
              f'"{CONFIG["dbus_zone"]}" is missing from zone{{}}', flush=True)

    # All zone-zone pairs must be known
    for szone, dzone in rules:
        if szone not in zones or dzone not in zones:
            fileline = config['_section_line'][f'{szone}-{dzone}']
            fail(f'{fileline}Unknown zone-zone: {szone}-{dzone}')

    # Add missing zone-zone pairs, needed for "ct established" return packets
    # and for dynamic interface-to-zone binding via D-Bus.
    # They will have single rule "drop log".
    # If localhost-localhost zone is missing it will be added as empty. Empty
    # is handled as "accept" in output_zones vmap.
    for szone in zones:
        for dzone in zones:
            if (szone, dzone) not in rules:
                if szone == dzone == localhost:
                    rules[(szone, dzone)] = []
                else:
                    rules[(szone, dzone)] = [
                        parse_rule_line(('', ['drop', 'log']))]


def output_rate_names(rules):
    """Output empty saddr_rate sets to ruleset."""
    counter = 1
    already_added = set()
    for rulelist in rules.values():
        for rule in rulelist:
            for rate in ('saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
                if not rule[rate]:
                    continue

                # Rule with rate found. It can be pre-named or anonymous.
                setname = rule[f'{rate}_name']
                if setname in already_added:
                    continue  # Pre-named and already added
                if not setname:  # Anonymous - invent a name for it
                    setname = rule[f'{rate}_name'] = f'_rate_set_{counter}'
                    counter += 1
                already_added.add(setname)

                # Output empty sets for IPv4 and IPv6. These will have
                # one minute timeout.
                for ipv in (4, 6):
                    out(f'set {setname}_{ipv} {{')
                    if rate == 'saddr_daddr_rate':
                        out(f'type ipv{ipv}_addr . ipv{ipv}_addr')
                    else:
                        out(f'type ipv{ipv}_addr')
                    out(f'size {CONFIG["set_size"]}')
                    if rule[rate].startswith('ct '):
                        out('flags dynamic')
                    else:
                        timeout = '1h' if 'hour' in rule[rate] else '1m'
                        out('flags dynamic,timeout')
                        out(f'timeout {timeout}')
                    out('}')


def suffix_mask(value, compare):
    """Convert suffix mask "IP/-mask" to nft."""
    if not compare:
        compare = '== '
    addr, mask = value.split('/-')
    mask = hex(pow(2, int(mask)) - 1)[2:]  # As long hex
    splitted = ''  # Convert to ":1234" parts
    while mask:
        splitted = f':{mask[-4:]}{splitted}'
        mask = mask[:-4]
    return f'& :{splitted} {compare}{addr}'


def single_or_set(data, fileline=''):
    """Convert data to single item or set if multiple values."""
    # Convert to list
    if isinstance(data, list):
        values = data
    else:
        values = data.split()

    # Handle negative: add "!=" to final rule
    neg = ''
    for index, value in enumerate(values):
        if value.startswith('-'):
            if index and not neg:
                fail(f'{fileline}Can\'t mix "+" and "-" items: '
                     f'{" ".join(values)}')
            neg = '!= '
        elif neg:
            fail(f'{fileline}Can\'t mix "+" and "-" items: {" ".join(values)}')
        if neg:
            values[index] = value[1:]

    # Single item
    if len(values) == 1 and ' ' not in values[0]:
        if '/-' in values[0]:
            return suffix_mask(values[0], neg)
        return neg + values[0]

    # Multiple -> set
    return f'{neg}{{ {", ".join(sorted(values))} }}'


def netmask_to_and(masklist, ipv, fileline):
    """Parse "masklist 24 56" rule and return "and 255.255.255.0 " string.

    First value is mask for IPv4 and second is for IPv6.
    """
    if not masklist:
        return ''
    masks = [int(item) for item in masklist.split() if item.isnumeric()]
    if len(masks) != 2 or masks[0] > 32 or masks[1] > 128:
        fail(f'{fileline}Invalid rate_mask: {masklist}')

    if ipv == 4:
        ipaddr = ipaddress.IPv4Network(f'0.0.0.0/{masks[0]}')
        return f'and {ipaddr.netmask} '

    ipaddr = ipaddress.IPv6Network(f'::/{masks[1]}')
    return f'and {ipaddr.netmask} '


def limit_rate_or_ct(rate):
    """Return "limit rate x" or "ct count x" according to rate."""
    if rate.startswith('ct '):
        return f'{rate} '
    return f'limit rate {rate} '


def rule_rate_limit(rule, ipv):
    """Return rule's rate limits as nft update-command."""
    if rule['global_rate']:
        return limit_rate_or_ct(rule['global_rate'])

    ret = ''
    for rate in ('saddr_rate', 'daddr_rate', 'saddr_daddr_rate'):
        rate_limit = rule[rate]
        if not rate_limit:
            continue
        rate_name = rule[f'{rate}_name']
        rate_mask = rule[f'{rate}_mask']

        # "update @foo { ip saddr "
        ret += 'add' if rate_limit.startswith('ct ') else 'update'
        ret += f' @{rate_name}_{ipv} {{ '
        if 'saddr' in rate:
            ret += f'{"ip" if ipv == 4 else "ip6"} saddr '
            ret += netmask_to_and(rate_mask, ipv, rule['fileline'])
        if rate == 'saddr_daddr_rate':
            ret += '. '
        if 'daddr' in rate:
            ret += f'{"ip" if ipv == 4 else "ip6"} daddr '
            ret += netmask_to_and(rate_mask, ipv, rule['fileline'])
        # "limit rate 3/second } "
        ret += f'{limit_rate_or_ct(rate_limit)}}} '
    return ret


def mark_set_argument(rule):
    """Convert "x" to "x", "x/y" to "mark and ~y or x"."""
    value = rule['mark_set']
    x_y = value.split('/')
    if len(x_y) > 2:
        fail(f'{rule["fileline"]}Invalid "mark_set" value: {value}')
    if len(x_y) == 1:
        return value

    # Convert "/0xff00" to "/0xffff00ff" so that same mask is used in
    # set and match operations in config. Nftables requires 0xffff00ff.
    try:
        mask = int(x_y[1], 0)  # dec or hex
    except ValueError:
        fail(f'{rule["fileline"]}Invalid "mark_set" value: {value}')
    return f'meta mark & {hex(0xffffffff ^ mask)} | {x_y[0]}'


def mark_match(rule):
    """Convert "x" to "== x", "x/y" to "and y == x".

    Negative "-x" can also be used to get "!= x".
    """
    value = rule['mark_match']
    if not value:
        return ''
    check = '=='
    if value.startswith('-'):
        check = '!='
        value = value[1:]
    x_y = value.split('/')
    if len(x_y) > 2:
        fail(f'{rule["fileline"]}Invalid "mark_match" value: '
             f'{rule["mark_match"]}')
    if len(x_y) == 1:
        return f'meta mark {check} {value} '
    return f'meta mark & {x_y[1]} {check} {x_y[0]} '


def rule_rate_log_policy(szone, dzone, rule, ipv, force_policy=None,
                         do_lograte=True):
    """Return rule's user, rate, log and policy as nft command."""
    # pylint: disable=too-many-arguments
    ret = ''
    policy = force_policy or rule['policy']

    if rule['uid']:
        ret += f'meta skuid {single_or_set(rule["uid"], rule["fileline"])} '
    if rule['gid']:
        ret += f'meta skgid {single_or_set(rule["gid"], rule["fileline"])} '

    ret += rule_rate_limit(rule, ipv)

    # "counter" in single rule line adds counters to it.
    #
    # "foomuuri { counter xxx }" can be used to add counters to all rules:
    #   yes        - add to all rules in all zone-zone
    #   zone-zone  - add to all rules in single zone-zone
    #   zone-any   - add to all rules in all zone-*
    #   any-zone   - add to all rules in all *-zone
    # Multiple zone-pairs can be defined
    counterlist = CONFIG['counter'].split()
    if not rule['nft'] and (  # pylint: disable=too-many-boolean-expressions
            rule['counter'] is not None or  # "counter" in this rule
            'yes' in counterlist or  # global "yes"
            f'{szone}-{dzone}' in counterlist or  # matching zone-zone
            f'{szone}-any' in counterlist or
            f'any-{dzone}' in counterlist
    ):
        ret += 'counter '
        if rule['counter']:
            ret += f'name {rule["counter"]} '

    # "log" in rule will log all packets matching this rule. This is usually
    # used in final "drop log" rule. Default log text is "zone-zone POLICY".
    if rule['log'] is not None or rule['log_level'] is not None:
        log_text = rule['log'] or (f'{szone}-{dzone} '
                                   f'{policy.split()[0].upper()}')
        log_level = (CONFIG['log_level'] if rule['log_level'] is None else
                     rule['log_level'])
        if do_lograte:
            # Limit logging to "foomuuri { log_rate }" entries. This is
            # important to avoid overlogging (DoS or filesystem full).
            logname = f'lograte_{len(LOGRATES) + 1}'
            LOGRATES[logname] = (ipv, log_text, policy, log_level)
            return f'{ret}jump {logname}'
        ret += f'log prefix "{log_text} " {log_level} '

    # mark_set argument parser
    if policy == 'mark_set':
        policy = f'meta mark set {mark_set_argument(rule)}'
    elif policy == 'mark_save':  # packet -> ct
        policy = 'ct mark set meta mark'
    elif policy == 'mark_restore':  # ct -> packet
        policy = 'meta mark set ct mark'

    return ret + policy


def output_icmp(szone, dzone, rules, ipv):
    """Find and parse icmp and icmpv6 rules.

    These must be handled before ct as "ct established" would accept
    ping floods. Default is to drop pings.
    """
    has_ping_rule = False
    has_match_all = False
    if ipv == 4:
        icmp = 'icmp'
        ping = '8'
    else:
        icmp = 'icmpv6'
        ping = '128'
    for rule in rules:
        if rule['protocol'] != icmp:
            continue

        match = match_rules(rule, rule['cast'], ipv, skip_icmp=False)
        if match is None:
            continue
        policy = rule_rate_log_policy(szone, dzone, rule, ipv)

        proto_ports = parse_protocol_ports(rule, ipv, skip_icmp=False)
        out(f'{proto_ports}{match}{policy}')
        if rule['dport'] and ping not in rule['dport'].split():
            continue  # Continue to next rule if this wasn't ping or match all

        # This rule was for ping, usually accepting non-flood pings. Add
        # explicit rule to drop overflow and all other pings.
        has_ping_rule = True
        if (match + policy).startswith(('accept', 'drop', 'reject', 'jump')):
            has_match_all = True
        elif has_match_all:  # Specific ping rule after match-all rule
            print(f'{rule["fileline"]}Warning: Unreachable ping rule')

    # Overflow-pings must be dropped before ct
    if has_ping_rule and not has_match_all:
        out(f'{icmp} type {ping} drop')

    # Allow needed icmp
    out(f'jump allow_icmp_{ipv}')


def parse_iplist(rule, direction, ipv):
    """Parse IP address list in rule[direction] to nft rule."""
    iplist = rule[direction]
    if not iplist:
        return ''

    ips = []
    for item in iplist.split():
        if item.startswith('@'):  # "@foo" to "@foo_4", used by resolve
            ips.append(f'{item}_{ipv}')
        else:
            ipv_addr = is_ip_address(item)
            if ipv_addr == ipv:  # Address for this ipv - add to list
                ips.append(item)
            elif not ipv_addr:  # Invalid IP address
                fail(f'{rule["fileline"]}Invalid IP address "{item}" in: '
                     f'{iplist}')

    # No matching addresses for this ipv family
    if not ips:
        raise ValueError

    # Return "ip saddr 10.2.3.4 " string
    return (f'{"ip" if ipv == 4 else "ip6"} {direction} '
            f'{single_or_set(ips, rule["fileline"])} ')


def parse_interface_names(rule):
    """Parse iifname/oifname to nft rule."""
    iifname = ''
    if rule['iifname']:
        iifname = f'iifname {rule["iifname"]} '
    oifname = ''
    if rule['oifname']:
        oifname = f'oifname {rule["oifname"]} '
    return iifname + oifname


def parse_protocol_ports(rule, ipv, skip_icmp=True):
    """Parse tcp/udp sport/dport to nft rule.

    This can also handle rules like:
    - "tcp" without dport to nft "protocol tcp"
    - "protocol esp" to nft "protocol esp"
    - "protocol esp 123" to nft "esp spi 123"
    - "protocol vlan 123" to nft "vlan id 123"
    """
    protocol = rule['protocol']
    if not protocol or (skip_icmp and protocol in ('icmp', 'icmpv6')):
        # Protocol is empty for rules like "drop log" and "dnat"
        # icmp is handled in output_icmp()
        return ''
    if ipv == 6 and protocol == 'igmp':
        return None  # IPv6 uses Multicast Listener Discovery ICMP

    ports = ''
    for key in ('sport', 'dport'):
        if rule[key]:
            protokey = key
            if key == 'dport':  # Change "dport" to protocol-specific key
                protokey = {
                    'ip': 'protocol',
                    'ip6': 'nexthdr',
                    'ah': 'spi',
                    'esp': 'spi',
                    'comp': 'nexthdr',
                    'icmp': 'type',
                    'icmpv6': 'type',
                    'dst': 'nexthdr',
                    'frag': 'nexthdr',
                    'hbh': 'nexthdr',
                    'mh': 'nexthdr',
                    'rt': 'nexthdr',
                    'vlan': 'id',
                    'arp': 'htype',
                }.get(protocol, protokey)
            ports += (f'{protocol} {protokey} '
                      f'{single_or_set(rule[key], rule["fileline"])} ')
    if ports:
        return ports
    return f'{"ip protocol" if ipv == 4 else "ip6 nexthdr"} {protocol} '


def parse_to(rule, ipv):
    """Parse snat/dnat "to" rule to nft rule."""
    if not rule['to']:
        return ''

    # "to" can be IPv4+IPv6, find correct one
    target = []
    for check in rule['to'].split():
        if check.count(':') == 1 and is_ip_address(check.split(':')[0]) == 4:
            check_ipv = 4  # IPv4 address with port
        elif (
                check.startswith('[') and
                ']:' in check and
                is_ip_address(check[1:].split(']:')[0]) == 6
        ):
            check_ipv = 6  # IPv6 address with port
        else:
            check_ipv = is_ip_address(check)
        if check_ipv == 0:
            fail(f'{rule["fileline"]}Invalid IP address in "to": {check}')
        if check_ipv == ipv:
            target.append(check)
    if not target:  # Nothing found for this ipv, don't generate rule
        return None
    if len(target) > 1:
        fail(f'{rule["fileline"]}Multiple "to" targets: {" ".join(target)}')
    return f' to {target[0]}'


def match_rules(rule, cast, ipv, *, skip_options=True, skip_icmp=True,
                need_addrlist=False):
    """Parse rule's matchers to nft rule."""
    # pylint: disable=too-many-arguments
    if (
            rule['cast'] != cast or
            (skip_icmp and rule['protocol'] in ('icmp', 'icmpv6')) or
            (skip_options and rule['mss'])
    ):
        return None

    # IPv4/6 specific rule?
    if ipv == 4 and rule['ipv6'] and not rule['ipv4']:
        return None
    if ipv == 6 and rule['ipv4'] and not rule['ipv6']:
        return None

    # Convert matchers to nft
    castmeta = ''
    if rule['cast'] != 'unicast':
        castmeta = f'meta pkttype {rule["cast"]} '

    ipsecmeta = ''
    if rule['sipsec']:
        ipsecmeta += f'meta ipsec {rule["sipsec"]} '
    if rule['dipsec']:
        ipsecmeta += f'rt ipsec {rule["dipsec"]} '

    ifname = parse_interface_names(rule)

    addrlist = ''
    try:
        addrlist += parse_iplist(rule, 'saddr', ipv)
        addrlist += parse_iplist(rule, 'daddr', ipv)
    except ValueError:
        return None
    if not addrlist and need_addrlist:  # Needed in snat/dnat
        addrlist = ('ip daddr 0.0.0.0/0 ' if ipv == 4 else
                    'ip6 daddr ::/0 ')

    proto_ports = parse_protocol_ports(rule, ipv)
    if proto_ports is None:
        return None

    mark = mark_match(rule)

    # Return matcher string
    return f'{ipsecmeta}{castmeta}{ifname}{addrlist}{proto_ports}{mark}'


def output_cast(cast, szone, dzone, rules, ipv):
    """Output all uni/multi/broadcast rules for single zone-zone."""
    has_mark_match = False
    for rule in rules:
        match = match_rules(rule, cast, ipv)
        if match is None:
            continue
        policy = rule_rate_log_policy(szone, dzone, rule, ipv, rule['nft'])
        if re.match(r'^counter( name \w+)? accept$', match + policy):
            continue  # Plain "counter" is handled in output_counter()

        if match + policy == 'meta mark set ct mark':
            has_mark_match = True
        if not has_mark_match and rule['mark_match']:
            has_mark_match = True
            out('meta mark set ct mark')

        out(f'{match}{policy}')

        # Does this rule need kernel helper?
        if rule['helper']:
            if rule['helper'].count('-') != 1:
                fail(f'{rule["fileline"]}Invalid helper name: '
                     f'{rule["helper"]}')
            HELPERS.append((rule['helper'], rule['protocol'], rule['dport']))
            kernelname = rule['helper'].split('-')[0].replace('_', '-')
            out(f'ct helper \"{kernelname}\" {rule["policy"]}')


def output_zonemap(zonemap, szone, dzone, ipv):
    """Output zonemap{} rules for this szone-dzone."""
    for rule in zonemap:
        if rule['szone'] and szone not in rule['szone'].split():
            continue
        if rule['dzone'] and dzone not in rule['dzone'].split():
            continue
        new_szone = rule['new_szone'] or szone
        new_dzone = rule['new_dzone'] or dzone
        if new_szone == szone and new_dzone == dzone:
            continue

        match = match_rules(rule, rule['cast'], ipv)
        if match is None:
            continue
        out(f'{match}jump {new_szone}-{new_dzone}_{ipv}')


def output_options(rules, ipv):
    """Output "mss" etc options."""
    for rule in rules:
        if rule['mss'] and ipv == 4:
            match = match_rules(rule, rule['cast'], ipv, skip_options=False)
            out(f'{match}tcp flags syn tcp option '
                f'maxseg size set {rule["mss"]}')


def output_counter(rules, ipv):
    """Output plain "counter" without "accept"."""
    for rule in rules:
        if rule['counter'] is not None:
            match = match_rules(rule, rule['cast'], ipv)
            if match != '':
                continue
            policy = rule_rate_log_policy('-', '-', rule, ipv,
                                          do_lograte=False)
            if re.match(r'^counter( name \w+)? accept$', policy):
                out(policy[:-7])


def output_zone(zonemap, szone, dzone, rules, ipv):
    """Output single zone-zone_ipv4 nft chain."""
    # Header + zonemap jumps + options
    out(f'chain {szone}-{dzone}_{ipv} {{')
    output_zonemap(zonemap, szone, dzone, ipv)
    output_options(rules, ipv)

    # Plain "counter" must count everything, before ct and icmp
    output_counter(rules, ipv)

    # ICMP is special, keep it before ct
    output_icmp(szone, dzone, rules, ipv)

    # Connection tracking
    out('ct state vmap {')
    out('established : accept,')
    out('related : accept,')
    out('invalid : goto invalid_drop,')
    out(f'new : jump smurfs_{ipv},')
    out(f'untracked : jump smurfs_{ipv}')
    out('}')

    # Broadcast and multicast
    output_cast('multicast', szone, dzone, rules, ipv)
    local_local = szone == dzone == CONFIG['localhost_zone']
    if ipv == 4:
        output_cast('broadcast', szone, dzone, rules, ipv)
        if not local_local:
            out('meta pkttype { broadcast, multicast } drop')
    elif not local_local:
        out('meta pkttype multicast drop')

    # Unicast
    output_cast('unicast', szone, dzone, rules, ipv)
    out('}')


def output_zone_vmaps(zones, rules):
    """Output interface verdict maps to jump to correct zone-zone.

    Beware:

    Wildcard matching coredumps in RHEL / nft v1.0.4. Do not use!
    It works fine on Fedora / nft v1.0.5.
    """
    # pylint: disable=too-many-branches

    # Vmap must have interval-flag if there is wildcard-interface.
    localhost = CONFIG["localhost_zone"]
    has_wildcard = False
    for value in zones.values():
        for interface in value['interface']:
            if '*' in interface:
                has_wildcard = True

    # Incoming zones
    out('map input_zones {')
    out('type ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    out('lo : accept,')
    for zone, value in zones.items():
        for interface in value['interface']:
            out(f'{interface} : jump {zone}-{localhost},')
    out('}')
    out('}')

    # Outgoing zones
    out('map output_zones {')
    out('type ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    if rules[(localhost, localhost)]:  # Use lo-lo if it is not empty
        out(f'lo : jump {localhost}-{localhost},')
    else:
        out('lo : accept,')
    for zone, value in zones.items():
        for interface in value['interface']:
            out(f'{interface} : jump {localhost}-{zone},')
    out('}')
    out('}')

    # Forwarding zones
    out('map forward_zones {')
    out('type ifname . ifname : verdict')
    if has_wildcard:
        out('flags interval')
    out('elements = {')
    out('lo . lo : accept,')
    for szone, svalue in zones.items():
        for dzone, dvalue in zones.items():
            for sinterface in svalue['interface']:
                for dinterface in dvalue['interface']:
                    out(f'{sinterface} . {dinterface} : '
                        f'jump {szone}-{dzone},')
    out('}')
    out('}')


def output_zone2zone_rules(rules, zonemap):
    """Output all zone-zone rules for both IPv4 and IPv6."""
    for szone, dzone in rules:
        # Split to zone-zone_4 and zone-zone_6
        out(f'chain {szone}-{dzone} {{')
        out('meta nfproto vmap {')
        out(f'ipv4 : jump {szone}-{dzone}_4,')
        out(f'ipv6 : jump {szone}-{dzone}_6')
        out('}')
        out('}')

        # IPv4 and IPv6 chains
        output_zone(zonemap, szone, dzone, rules[(szone, dzone)], 4)
        output_zone(zonemap, szone, dzone, rules[(szone, dzone)], 6)


def output_rule_section(rules, section):
    """Output snat, dnat, prerouting or postrouting rules."""
    if not rules:
        return

    hooktype = {
        'snat': 'type nat hook postrouting priority srcnat',
        'dnat': 'type nat hook prerouting priority dstnat',
        'prerouting': 'type filter hook prerouting priority mangle',
        'postrouting': 'type filter hook postrouting priority mangle',
        'output': 'type route hook output priority mangle',
        'forward': 'type filter hook forward priority mangle',
    }.get(section)
    hooktype += CONFIG['priority_offset']

    chain_name = hooktype.split()
    out(f'chain {chain_name[1]}_{chain_name[3]}_{chain_name[5]} {{')
    out(hooktype)

    has_mark_set = False
    has_mark_match = False
    ip_merger = set()
    for rule in rules:
        for ipv in (4, 6):
            to_rule = parse_to(rule, ipv)
            if to_rule is None:
                continue
            match = match_rules(rule, rule['cast'], ipv, need_addrlist=to_rule)
            if match is None:
                continue
            policy = rule_rate_log_policy(section.upper(), '', rule, ipv,
                                          rule['nft'], do_lograte=False)

            # Mark match/set requires restoring/saving mark. Add it if not
            # already added.
            if match + policy == 'meta mark set ct mark':
                has_mark_match = True
            else:
                has_mark_set |= 'meta mark set' in policy
            if match + policy == 'ct mark set meta mark':
                has_mark_set = False

            if not has_mark_match and rule['mark_match']:
                has_mark_match = True
                out('meta mark set ct mark')

            # There are no separate IPv4/IPv6 chains so merge possible rules
            full_rule = f'{match}{policy}{to_rule}'
            if full_rule in ip_merger:
                continue
            ip_merger.add(full_rule)
            out(full_rule)

    # Save mark to connection tracking if there were mark_set rules. These
    # are used in prerouting{} for multi-ISP support.
    if has_mark_set:
        out('ct mark set meta mark')
    out('}')


def output_static_chain_logging(chain, policy):
    """Output logging rules for input/output/forward/invalid/smurfs chains."""
    lograte = CONFIG[f'log_{chain}']  # Enabled in foomuuri{}
    if lograte.lower() == 'no':
        return
    if lograte.lower() == 'yes':  # "yes" means use standard log rate
        lograte = CONFIG['log_rate']
    flags = ' flags ip options' if chain == 'invalid' else ''
    flags += f' {CONFIG["log_level"]}'
    chain = chain.upper()
    policy = policy.upper()

    if not lograte:
        out(f'log prefix "{chain} {policy} "{flags}')
        return

    out(f'update @_lograte_set_4 {{ ip saddr limit rate {lograte} }} '
        f'log prefix "{chain} {policy} "{flags}')
    out(f'update @_lograte_set_6 {{ ip6 saddr limit rate {lograte} }} '
        f'log prefix "{chain} {policy} "{flags}')


def output_header():
    """Output generic nft header."""
    command_stop(False)  # Stop previous foomuuri
    out('')
    out('table inet foomuuri {')  # Add new foomuuri
    out('')

    # Insert include files
    # pylint: disable=no-member  # rglob, read_text
    for filename in (sorted(list(CONFIG['share_dir'].rglob('*.nft'))) +
                     sorted(list(CONFIG['etc_dir'].rglob('*.nft')))):
        try:
            lines = filename.read_text('utf-8').splitlines()
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')
        for line in lines:
            line = line.strip()
            if line:
                out(line)

    # Logging chains
    for chain in ('invalid', 'smurfs', 'rpfilter'):
        out(f'chain {chain}_drop {{')
        if chain == 'rpfilter':
            out('udp sport 67 udp dport 68 return')
        output_static_chain_logging(chain, 'drop')
        out('drop')
        out('}')

    # input/output/forward jump chains
    out('chain input {')
    out(f'type filter hook input priority filter{CONFIG["priority_offset"]}')
    out('iifname vmap @input_zones')
    output_static_chain_logging('input', 'drop')
    out('drop')
    out('}')

    out('chain output {')
    out(f'type filter hook output priority filter{CONFIG["priority_offset"]}')
    out('oifname vmap @output_zones')
    output_static_chain_logging('output', 'reject')
    out('reject with icmpx admin-prohibited')
    out('}')

    out('chain forward {')
    out(f'type filter hook forward priority filter{CONFIG["priority_offset"]}')
    out('iifname . oifname vmap @forward_zones')
    output_static_chain_logging('forward', 'drop')
    out('drop')
    out('}')


MERGES = [  # Preferred order for rules
    'meta pkttype multicast udp dport',
    'meta pkttype broadcast udp dport',
    'udp dport',
    'udp sport',
    'tcp dport',
    'tcp sport',
    'ct helper',
]


def merge_accepts(accepts, linenum):
    """Sort and merge found accept rules."""
    merge = {key: [] for key in MERGES[::-1]}
    ret = 0
    for accept in accepts:
        for key, ports in merge.items():
            regex = f'^{key} (\\{{ )?([-\\d, ]+)( \\}})? accept$'
            match = re.match(regex, accept)
            if match:  # Add "22" from "tcp dport 22 accept" to merged
                ports.append(match.group(2))
                break
        else:  # Can't merge, output as is
            OUT.insert(linenum + ret, accept)
            ret += 1

    # Output merged
    for key, ports in merge.items():
        if ports:
            OUT.insert(linenum, f'{key} {single_or_set(ports)} accept')
            ret += 1
    return ret


def optimize_accepts():
    """Optimize ruleset accepts.

    This will change multiple accepts to single accept using set.
    """
    accepts = []
    linenum = 0
    while linenum < len(OUT):
        line = OUT[linenum]
        if (
                line.endswith(' accept') and
                line.startswith(tuple(MERGES) + ('ip ', 'ip6 '))
        ):
            accepts.append(line)
            del OUT[linenum]
        else:
            linenum += merge_accepts(accepts, linenum) + 1
            accepts = []


def optimize_jumps():
    """Optimize lograte jumps in ruleset.

    This will change zone-zone's final "drop log" rule to optimized version.
    """
    linenum = 0
    while linenum < len(OUT):
        line = OUT[linenum]
        if line.startswith('jump lograte_'):
            logname = line.split()[1]
            ipv, log_text, policy, log_level = LOGRATES.pop(logname)
            rate = ''
            if CONFIG['log_rate']:
                rate = (f'update @_lograte_set_{ipv} '
                        f'{{ {"ip" if ipv == 4 else "ip6"} saddr '
                        f'limit rate {CONFIG["log_rate"]} }} ')
            OUT[linenum] = f'{rate}log prefix "{log_text} " {log_level}'
            OUT.insert(linenum + 1, policy)
        linenum += 1


def output_logrates():
    """Output non-optimized lograte entries as chains."""
    for logname, (ipv, log_text, policy, log_level) in LOGRATES.items():
        out(f'chain {logname} {{')
        rate = ''
        if CONFIG['log_rate']:
            rate = (f'update @_lograte_set_{ipv} '
                    f'{{ {"ip" if ipv == 4 else "ip6"} saddr '
                    f'limit rate {CONFIG["log_rate"]} }} ')
        out(f'{rate}log prefix "{log_text} " {log_level}')
        out(f'{policy}')
        out('}')

    # Output empty lograte sets (optimized and non-optimized)
    for ipv in (4, 6):
        out(f'set _lograte_set_{ipv} {{')
        out(f'type ipv{ipv}_addr')
        out(f'size {CONFIG["set_size"]}')
        out('flags dynamic,timeout')
        out('timeout 1m')
        out('}')


def output_resolve_sets(resolve, flags):
    """Output empty resolve{} and iplist{} sets."""
    for name in resolve:
        if not name.startswith('@'):
            continue
        out(f'set {name[1:]} {{')
        out(f'type ipv{name[-1]}_addr')
        out(f'flags {flags}timeout')
        out('}')


def output_resolve_elements(resolve, statefile):
    """Add resolve{} and iplist{} elements from state file to ruleset."""
    # Read previous resolve results
    filename = CONFIG[statefile]
    try:
        # pylint: disable=no-member
        content = filename.read_text(encoding='utf-8')
    except FileNotFoundError:
        return
    except PermissionError as error:
        fail(f'File {filename}: Can\'t read: {error}')

    # Known resolve names in current config files
    known = [name[1:] for name in resolve if name.startswith('@')]
    if not known:
        return

    # Add previous result if resolve name is known
    out('')
    now = datetime.datetime.now(datetime.timezone.utc)
    for line in content.splitlines():
        if line.startswith('# '):  # Try "didn't exist in active ruleset" now
            line = line[2:]
        tokens = line.split()
        if (
                line.startswith('add element inet foomuuri ') and
                len(tokens) >= 10 and
                tokens[7] == 'timeout' and
                tokens[4] in known  # It is known name
        ):
            # Check optional expire timestamp, used in iplist-manual.fw
            if len(tokens) == 12 and tokens[10] == '#':
                try:
                    expire = datetime.datetime.strptime(tokens[11],
                                                        '%Y-%m-%dT%H:%M:%S')
                    expire = expire.replace(tzinfo=datetime.timezone.utc)
                except ValueError:
                    continue
                # Skip expired entries, update timeout for rest
                seconds = int((expire - now).total_seconds())
                if seconds <= 0:
                    continue
                line = f'{" ".join(tokens[:8])} {seconds}s }}'

            out(line)


def output_named_counters(rules):
    """Output named counters."""
    # Collect all counter names
    names = set()
    for rulelist in rules.values():
        for rule in rulelist:
            if rule['counter']:
                names.add(rule['counter'])

    # Output counters
    for name in names:
        out(f'counter {name} {{')
        out('}')


def output_helpers():
    """Output helpers."""
    # Convert helper list to helper->proto->set(ports) dict
    helpers = {}
    for name, proto, ports in HELPERS:
        if name not in helpers:
            helpers[name] = {}
        if proto not in helpers[name]:
            helpers[name][proto] = set()
        for port in ports.split():
            helpers[name][proto].add(port)
    if not helpers:
        return

    # Output "ct helper" lines
    for name, protos in helpers.items():
        kernelname = name.split('-')[0].replace('_', '-')
        out(f'ct helper {name} {{')
        for proto in protos:
            out(f'type \"{kernelname}\" protocol {proto}')
        out('}')

    # Output prerouting
    out('chain helper {')
    out(f'type filter hook prerouting priority filter'
        f'{CONFIG["priority_offset"]}')
    for name, protos in helpers.items():
        for proto, ports in protos.items():
            out(f'{proto} dport {single_or_set(" ".join(ports))} '
                f'ct helper set \"{name}\"')
    out('}')


def output_rpfilter():
    """Prerouting chain to check rpfilter."""
    if CONFIG['rpfilter'] == 'no':
        return
    out('chain rpfilter {')
    out(f'type filter hook prerouting priority filter'
        f'{CONFIG["priority_offset"]}')
    interfaces = ''
    if CONFIG['rpfilter'] != 'yes':  # Specific interfaces?
        interfaces = f'iifname {single_or_set(CONFIG["rpfilter"])} '
    out(f'{interfaces}fib saddr . mark . iif oif eq 0 meta ipsec missing '
        f'jump rpfilter_drop')
    out('}')


def output_footer():
    """Output generic ruleset footer."""
    out('}')


def save_file(filename, lines):
    """Write lines to file."""
    try:
        filename.unlink(missing_ok=True)
        filename.write_text('\n'.join(lines) + '\n', 'utf-8')
        filename.chmod(0o600)
    except PermissionError as error:
        fail(f'File {filename}: Can\'t write: {error}')
    except FileNotFoundError:  # Simultaneos saves and chmod gives error
        pass


def env_cleanup(text):
    """Allow only letters and numbers in text for environment variable."""
    # Convert ä->a as isalpha('ä') is true
    value = unicodedata.normalize('NFKD', text)
    value = value.encode('ASCII', 'ignore').decode('utf-8')

    # Remove non-alphanumeric chars
    return ''.join(char if char.isalnum() else '_' for char in value)


def save_final(filename):
    """Save final ruleset to file."""
    # Convert to indented lines
    indent = 0
    lines = []
    for line in OUT:
        if line.startswith('}'):
            indent -= 1
        if line:
            line = '\t' * indent + line
        lines.append(line)
        if line == '\t}':
            lines.append('')
        if line.endswith('{'):
            indent += 1

    # Save to "next" file
    save_file(filename, lines)


def signal_childs():
    """Signal foomuuri-dbus and foomuuri-monitor to reload."""
    for child in ('dbus', 'monitor'):
        # Read pid
        filename = CONFIG['run_dir'] / f'foomuuri-{child}.pid'
        try:
            pid = int(filename.read_text(encoding='utf-8'))
        except PermissionError as error:
            fail(f'File {filename}: Can\'t read: {error}')
        except (FileNotFoundError, ValueError):
            continue

        # Send reload-signal
        try:
            os.kill(pid, signal.SIGHUP)
        except OSError:
            pass


def apply_final():
    """Use final ruleset."""
    # Check config
    if CONFIG['command'] == 'check':
        ret = run_program_rc([CONFIG['nft_bin'], '--check', '--file',
                              CONFIG['next_file']])
        if ret:
            print(f'Error: Nftables failed to check ruleset, error code {ret}')
        else:
            print('check success')
        return ret

    # Run pre_start / pre_stop hook
    run_program_rc(CONFIG.get(f'pre_{CONFIG["command"][:5]}'))

    # Load "next"
    ret = run_program_rc([CONFIG['nft_bin'], '--file', CONFIG['next_file']])

    # Rename "next" to "good" if success and signal dbus to reload
    if ret == 0:
        if CONFIG['command'].startswith('start'):
            # pylint: disable=no-member
            CONFIG['good_file'].unlink(missing_ok=True)
            CONFIG['next_file'].rename(CONFIG['good_file'])
        signal_childs()
        run_program_rc(CONFIG.get(f'post_{CONFIG["command"][:5]}'))
        print(f'{CONFIG["command"]} success')
        return 0

    # Failure. Exit or fallback to good if "start-or-good".
    print(f'Error: Failed to load ruleset to nftables, error code {ret}')
    if CONFIG['command'] == 'start-or-good':
        if CONFIG['good_file'].exists():
            ret = run_program_rc([CONFIG['nft_bin'], '--file',
                                  CONFIG['good_file']])
            if ret:
                print(f'Error: Failed to load \"good\" ruleset to nftables, '
                      f'error code {ret}')
            else:
                signal_childs()  # Good success, signal dbus + monitor
                run_program_rc(CONFIG.get('post_start'))
                print('Warning: Fallbacked to previous \"good\" ruleset')
        else:
            print('Error: Failed to load \"good\" ruleset, no such file')
    return 1


def command_start():
    """Process "start" or "check" command."""
    # Read full config
    config = minimal_config()
    zones = parse_config_zones(config)
    zonemap = parse_config_zonemap(config)
    resolve = parse_resolve(config, 'resolve')
    iplist = parse_resolve(config, 'iplist')
    snat = parse_config_rule_section(config, 'snat')
    dnat = parse_config_rule_section(config, 'dnat')
    prerouting = parse_config_rule_section(config, 'prerouting')
    postrouting = parse_config_rule_section(config, 'postrouting')
    output = parse_config_rule_section(config, 'output')
    forward = parse_config_rule_section(config, 'forward')
    templates = parse_config_templates(config)
    parse_config_groups(config, parse_config_targets(config))
    parse_config_hook(config)
    rules = parse_config_rules(config)  # Also verify for unknown sections
    insert_any_zones(zones, rules)
    expand_templates(rules, templates)
    verify_config(config, zones, rules)

    # Generate output
    output_header()
    output_rate_names(rules)
    output_zone_vmaps(zones, rules)
    output_zone2zone_rules(rules, zonemap)
    output_rule_section(snat, 'snat')
    output_rule_section(dnat, 'dnat')
    output_rule_section(prerouting, 'prerouting')
    output_rule_section(postrouting, 'postrouting')
    output_rule_section(output, 'output')
    output_rule_section(forward, 'forward')
    optimize_jumps()
    optimize_accepts()
    output_logrates()
    output_resolve_sets(resolve, '')
    output_resolve_sets(iplist, 'interval,')
    output_named_counters(rules)
    output_helpers()
    output_rpfilter()
    output_footer()
    output_resolve_elements(resolve, 'resolve_file')
    output_resolve_elements(iplist, 'iplist_file')
    output_resolve_elements(iplist, 'iplist_manual_file')

    # Save known zones to file
    save_file(CONFIG['zone_file'], zones.keys())


def command_stop(parse_config=True):
    """Process "stop" command. This will remove all foomuuri rules."""
    if parse_config:  # Needed for pre_stop and post_stop hooks
        config = minimal_config()
        parse_config_hook(config)
    out('table inet foomuuri')
    out('delete table inet foomuuri')


class DbusCommon:
    """D-Bus server - Common Functions."""

    zones = None

    def set_data(self, zones):
        """Save config data: zone list is static."""
        self.zones = zones

    @staticmethod
    def parse_interface_zone():
        """Parse current interface->zone mapping from active nft ruleset."""
        data = run_program_json([CONFIG['nft_bin'], '--json', 'list', 'map',
                                 'inet', 'foomuuri', 'input_zones'])
        if not data:
            return {}
        ret = {}
        for item in data['nftables']:
            if 'map' in item:
                for interface, rule in item['map']['elem']:
                    if interface != 'lo':
                        ret[interface] = rule['jump']['target'].split('-')[0]
        return ret

    @staticmethod
    def clean_out():
        """Remove all entries from current OUT[] variable."""
        while OUT:
            del OUT[0]

    @staticmethod
    def apply_out():
        """Apply current OUT commands."""
        save_final(CONFIG['dbus_file'])
        run_program_rc([CONFIG['nft_bin'], '--file', CONFIG['dbus_file']])

    @staticmethod
    def remove_interface(interface_zone, interface):
        """Remove interface from all zones."""
        zone = interface_zone.get(interface)
        if not zone:
            return ''

        # input, output
        out(f'delete element inet foomuuri input_zones '
            f'{{ {interface} : jump {zone}-{CONFIG["localhost_zone"]} }}')
        out(f'delete element inet foomuuri output_zones '
            f'{{ {interface} : jump {CONFIG["localhost_zone"]}-{zone} }}')

        # forward
        for other, otherzone in interface_zone.items():
            out(f'delete element inet foomuuri forward_zones '
                f'{{ {other} . {interface} : jump {otherzone}-{zone} }}')
            if other != interface:
                out(f'delete element inet foomuuri forward_zones '
                    f'{{ {interface} . {other} : jump {zone}-{otherzone} }}')
        return zone

    @staticmethod
    def add_interface(interface_zone, interface, zone):
        """Add interface to zone."""
        # input, output
        out(f'add element inet foomuuri input_zones '
            f'{{ {interface} : jump {zone}-{CONFIG["localhost_zone"]} }}')
        out(f'add element inet foomuuri output_zones '
            f'{{ {interface} : jump {CONFIG["localhost_zone"]}-{zone} }}')

        # forward
        for other, otherzone in interface_zone.items():
            if other != interface:
                out(f'add element inet foomuuri forward_zones '
                    f'{{ {other} . {interface} : jump {otherzone}-{zone} }}')
                out(f'add element inet foomuuri forward_zones '
                    f'{{ {interface} . {other} : jump {zone}-{otherzone} }}')
        out(f'add element inet foomuuri forward_zones '
            f'{{ {interface} . {interface} : jump {zone}-{zone} }}')

    def change_interface_zone(self, interface, new_zone):
        """Change interface to zone, or delete if zone is empty."""
        interface, new_zone = str(interface), str(new_zone)
        if new_zone and new_zone not in self.zones:
            print(f'Warning: Zone "{new_zone}" is unknown', flush=True)
            return '', ''
        if new_zone == CONFIG['localhost_zone']:
            print(f'Warning: Can\'t add to zone '
                  f'"{CONFIG["localhost_zone"]}"', flush=True)
            return '', ''
        interface_zone = self.parse_interface_zone()
        self.clean_out()
        old_zone = self.remove_interface(interface_zone, interface)
        if new_zone:
            self.add_interface(interface_zone, interface, new_zone)
        self.apply_out()
        return old_zone, new_zone

    def parse_default_zone(self, interface, zone):
        """Return zone, or dbus_zone if empty."""
        interface, zone = str(interface), str(zone)
        if zone:
            return zone

        # Fallback to zones section, or to foomuuri.dbus_zone
        for key, value in self.zones.items():
            if interface in value['interface']:
                return key
        return CONFIG['dbus_zone']

    def method_get_zones(self):
        """Get available zones. "localhost" can't have interfaces."""
        return [name for name in self.zones
                if name != CONFIG['localhost_zone']]

    def method_remove_interface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty."""
        print(f'Interface "{interface}" remove from zone "{zone}"', flush=True)
        return self.change_interface_zone(interface, '')[0]

    def method_add_interface(self, zone, interface):
        """Add interface to zone. There can be only one zone per interface."""
        zone = self.parse_default_zone(interface, zone)
        print(f'Interface "{interface}" add to zone "{zone}"', flush=True)
        return self.change_interface_zone(interface, zone)[1]

    def method_change_zone_of_interface(self, zone, interface):
        """Change interface's zone."""
        zone = self.parse_default_zone(interface, zone)
        print(f'Interface "{interface}" change to zone "{zone}"', flush=True)
        return self.change_interface_zone(interface, zone)[0]


class DbusFoomuuri(dbus.service.Object, DbusCommon):
    """D-Bus server for Foomuuri."""

    # pylint: disable=invalid-name  # dbus method names

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='', out_signature='as')
    def getZones(self):
        """Get available zones. "localhost" can't have interfaces."""
        return self.method_get_zones()

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def removeInterface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty."""
        return self.method_remove_interface(zone, interface)

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def addInterface(self, zone, interface):
        """Add interface to zone. There can be only one zone per interface."""
        return self.method_add_interface(zone, interface)

    @dbus.service.method('fi.foobar.Foomuuri1.zone',
                         in_signature='ss', out_signature='s')
    def changeZoneOfInterface(self, zone, interface):
        """Change interface's zone."""
        return self.method_change_zone_of_interface(zone, interface)


class DbusFirewallD(dbus.service.Object, DbusCommon):
    """D-Bus server for FirewallD emulation."""

    # pylint: disable=invalid-name  # dbus method names

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='', out_signature='as')
    def getZones(self):
        """Get available zones. "localhost" can't have interfaces."""
        return self.method_get_zones()

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def removeInterface(self, zone, interface):
        """Remove interface from zone, or from all if zone is empty."""
        return self.method_remove_interface(zone, interface)

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def addInterface(self, zone, interface):
        """Add interface to zone. There can be only one zone per interface."""
        return self.method_add_interface(zone, interface)

    @dbus.service.method('org.fedoraproject.FirewallD1.zone',
                         in_signature='ss', out_signature='s')
    def changeZoneOfInterface(self, zone, interface):
        """Change interface's zone."""
        return self.method_change_zone_of_interface(zone, interface)


def command_dbus():
    """Start D-Bus daemon."""
    CONFIG['keep_going'] = True
    while CONFIG['keep_going']:
        # Read minimal config
        config = minimal_config()
        zones = parse_config_zones(config)

        # Initialize D-Bus
        dbus.mainloop.glib.DBusGMainLoop(set_as_default=True)
        bus = dbus.SystemBus()

        # Foomuuri D-Bus calls
        try:
            foomuuri_name = dbus.service.BusName('fi.foobar.Foomuuri1', bus)
            foomuuri_name.get_name()  # Dummy call to get rid of pylint
        except dbus.exceptions.DBusException:
            fail('Can\'t bind to system D-Bus: fi.foobar.Foomuuri1')
        foomuuri_object = DbusFoomuuri(bus, '/fi/foobar/Foomuuri1')
        foomuuri_object.set_data(zones)

        # FirewallD emulation calls, if enabled in foomuuri{} config
        firewalld_object = None
        if CONFIG['dbus_firewalld'] == 'yes':
            try:
                firewalld_name = dbus.service.BusName(
                    'org.fedoraproject.FirewallD1', bus)
                firewalld_name.get_name()  # Dummy call to get rid of pylint
            except dbus.exceptions.DBusException:
                fail('Can\'t bind to system D-Bus: '
                     'org.federaproject.FirewallD1')
            firewalld_object = DbusFirewallD(bus,
                                             '/org/fedoraproject/FirewallD1')
            firewalld_object.set_data(zones)

        # Define reload/stop signal handler
        mainloop = GLib.MainLoop()

        def signal_handler(sig, _dummy_frame):
            if sig == signal.SIGINT:
                CONFIG['keep_going'] = False
                if HAVE_NOTIFY:
                    notify('STOPPING=1')
            elif HAVE_NOTIFY:
                notify('RELOADING=1')
            mainloop.quit()

        signal.signal(signal.SIGHUP, signal_handler)
        signal.signal(signal.SIGINT, signal_handler)
        save_file(CONFIG['run_dir'] / 'foomuuri-dbus.pid', [str(os.getpid())])

        # Start processing messages
        print('D-Bus handler ready', flush=True)
        if HAVE_NOTIFY:
            notify('READY=1')
        mainloop.run()

        # Reload signal received. Disconnect from D-Bus.
        foomuuri_object.remove_from_connection()
        del foomuuri_object
        del foomuuri_name
        if firewalld_object:
            firewalld_object.remove_from_connection()
            del firewalld_object
            del firewalld_name
        del bus


def resolve_one_hostname(hostname):
    """Resolve hostname and return its IP addresses."""
    ret = set()
    try:
        for item in socket.getaddrinfo(hostname, None):
            if item[4] and item[4][0]:
                ret.add(item[4][0])
    except socket.gaierror:
        pass
    return ret


def resolve_all_hostnames(resolve):
    """Return dict of hostname->iplist."""
    hosts = {}
    for hostlist in resolve.values():
        for hostname in hostlist:
            if hostname not in hosts:
                hosts[hostname] = resolve_one_hostname(hostname)
    return hosts


def active_sets():
    """Return list of set names in currently active firewall."""
    ret = set()
    data = run_program_json([CONFIG['nft_bin'], '--json', 'list', 'ruleset'])
    if not data:
        return ret
    for item in data['nftables']:
        if 'set' in item and item['set']['table'] == 'foomuuri':
            ret.add(item['set']['name'])
    return ret


def apply_resolve_iplist(filename, error_txt, pending_error):
    """Save and apply resolve/iplist update."""
    if not OUT:
        return pending_error
    save_final(CONFIG[filename])
    ret = run_program_rc([CONFIG['nft_bin'], '--file', CONFIG[filename]])
    if ret:
        print(f'Error: Failed to update {error_txt}, error code {ret}')
    return ret or pending_error


def config_resolve_to_iplist():
    """Parse config's resolve{} and return "setname_4" -> iplist dict."""
    config = minimal_config()
    resolve = parse_resolve(config, 'resolve', '24h')
    timeout = resolve.pop('timeout')
    resolve.pop('refresh')
    hosts = resolve_all_hostnames(resolve)

    # Convert resolve "@setname_4 -> hostlist" to "setname_4 -> iplist"
    current = {}
    for setname, hostlist in resolve.items():
        iplist = set()
        ipv = int(setname[-1])
        for hostname in hostlist:
            for ipaddr in hosts[hostname]:
                if is_ip_address(ipaddr) == ipv:
                    iplist.add(ipaddr)
        current[setname[1:]] = sorted(iplist)
    return current, timeout


def warn_resolve_iplist(setname, warned, error1, error2):
    """Print warning that setname does not exist."""
    if setname[:-2] in warned:
        return  # Already warned

    print(f'Warning: {error1} "@{setname[:-2]}" {error2}', flush=True)
    warned.add(setname[:-2])


def command_resolve():
    """Resolve hostnames."""
    # Read config and resolve
    current, timeout = config_resolve_to_iplist()
    previous = read_previous_resolve_iplist('resolve_file')[0]
    known = active_sets()

    # Output entries
    warned = set()
    error = 0
    for setname, addresses in current.items():
        for address in addresses:
            # Is this set active in current ruleset? If not, add entry as
            # comment
            if setname not in known:
                comment = '# '
                warn_resolve_iplist(setname, warned, 'Resolve', 'does not '
                                    'exist in currently active firewall')
            else:
                # Success, update elements to ruleset.
                comment = ''
                out(f'add element inet foomuuri {setname} {{ {address} }}')
                out(f'delete element inet foomuuri {setname} {{ {address} }}')
            out(f'{comment}add element inet foomuuri {setname} '
                f'{{ {address} timeout {timeout} }}')

        # All lookups failed. Add previous entries as comments.
        if not addresses and setname in previous:
            warn_resolve_iplist(setname, warned, 'Resolve',
                                'failed to resolve')
            error = 1
            for address in sorted(previous[setname]):
                out(f'# add element inet foomuuri {setname} '
                    f'{{ {address} timeout {timeout} }}')

    # Apply update
    return apply_resolve_iplist('resolve_file', 'resolve', error)


def get_url(url, setname):
    """Download URL and return it as text, or None if failure."""
    try:
        response = requests.get(url, timeout=60)
    except OSError as error:
        print(f'Warning: Iplist "@{setname}", can\'t download {url}, '
              f'error: {error}')
        return None
    if response.status_code != 200:
        print(f'Warning: Iplist "@{setname}", can\'t download {url}, '
              f'status code: {response.status_code}')
        return None
    return response.text


def get_file(wildcard, setname):
    """Read all files and return them as single text."""
    wildpath = pathlib.Path(wildcard)
    filenames = sorted(wildpath.parent.glob(wildpath.name))
    if not filenames:
        print(f'Warning: Iplist "@{setname}", can\'t read file '
              f'{wildcard}: No such file')
        return None
    text = ''
    for filename in filenames:
        try:
            content = filename.read_text(encoding='utf-8')
        except PermissionError as error:
            print(f'Warning: Iplist "@{setname}", can\'t read file '
                  f'{filename}: {error}')
            return None
        text = text + '\n' + content
    return text


def parse_hour_min(timespec, fallback):
    """Parse 4h3m to seconds.

    This is a dummy parser without any good error checking.
    """
    if not timespec:
        return fallback
    timespec = timespec.replace(' ', '')
    hours = mins = 0
    if 'h' in timespec:
        hours, timespec = timespec.split('h', 1)
    if 'm' in timespec:
        mins, timespec = timespec.split('m', 1)
    if timespec:
        return fallback
    try:
        return int(hours) * 3600 + int(mins) * 60
    except ValueError:
        return fallback


def get_all_urls_and_files(setname, namelist, last_refresh_time,
                           default_refresh):
    """Read all URLs and filename-wildcards and return them as text."""
    # Is it time to update this list?
    refresh = parse_hour_min(default_refresh, 0)
    for item in namelist:
        if item.startswith('refresh='):
            refresh = parse_hour_min(item[8:], refresh)
    if not last_refresh_time:
        time_left = 0
    else:
        now = datetime.datetime.now(datetime.timezone.utc)
        time_left = int((last_refresh_time +
                         datetime.timedelta(seconds=refresh) -
                         now).total_seconds())
    if time_left > 0:
        verbose(f'Iplist "@{setname}" refresh skipped, {time_left} seconds '
                f'to next refresh')
        return '_refresh'  # Not yet

    # It is time. Fetch!
    text = ''
    for filename in namelist:
        if filename.startswith('refresh='):
            continue
        if filename.startswith(('https:', 'http:')):
            content = get_url(filename, setname)
        else:
            content = get_file(filename, setname)
        if content is None:
            return None  # Return None if any fails
        text = text + '\n' + content
    verbose(f'Iplist "@{setname} refreshed')
    return text


def read_previous_resolve_iplist(filename):
    """Read previous resolve/iplist state file and parse it to dict."""
    # Read state file, silently ignore if missing
    try:
        # pylint: disable=no-member
        content = CONFIG[filename].read_text(encoding='utf-8')
    except FileNotFoundError:
        return {}, {}
    except PermissionError as error:
        fail(f'File {CONFIG["iplist_file"]}: Can\'t read: {error}')

    # Parse lines to setname->set(ipaddrs)
    addrs = {}
    last_refresh = {}
    for line in content.splitlines():
        if line.startswith('# last_refresh'):
            try:
                last_refresh[line.split()[2]] = datetime.datetime.strptime(
                    line.split()[3], '%Y-%m-%dT%H:%M:%S').replace(
                        tzinfo=datetime.timezone.utc)
            except (IndexError, ValueError):
                pass
            continue
        tokens = line.replace('# ', '').split()
        if len(tokens) != 10 or tokens[7] != 'timeout':
            continue
        setname = tokens[4]
        if setname not in addrs:
            addrs[setname] = set()
        addrs[setname].add(tokens[6])
    return addrs, last_refresh


def read_all_iplists(iplist, last_refresh, default_refresh):
    """Read all iplist files and return dict setname_ipv->iplist."""
    ret = {}
    for setname, filenames in iplist.items():
        # iplist has "name_6" and "name_4" with same content, ignore "_6"
        if setname.endswith('_6'):
            continue
        setname = setname[1:-2]  # Strip "@" and "_4"

        # Get content
        text = get_all_urls_and_files(setname, filenames,
                                      last_refresh.get(setname),
                                      default_refresh)
        if text is None:  # Fetch failure
            continue
        if text == '_refresh':  # It wasn't time to refresh
            ret[setname] = text
            continue

        # Parse each line to IPv4/IPv6 lists
        addr_4 = set()
        addr_6 = set()
        for line in text.splitlines():
            # Strip comments and empty lines
            if '#' in line:
                line = line.split('#')[0]
            line = line.strip()
            if not line:
                continue

            # Parse single item per line
            if is_ipv4_address(line):
                addr_4.add(line)
            elif is_ipv6_address(line):
                addr_6.add(line)
            else:
                print(f'Warning: Invalid content in iplist {{ @{setname} }}: '
                      f'{line}')
                break
        else:  # Include it to reply if all success, update its timestamp
            ret[f'{setname}_4'] = addr_4
            ret[f'{setname}_6'] = addr_6
            last_refresh[setname] = datetime.datetime.now(
                datetime.timezone.utc)
    return ret


def iterate_set_elements(data):
    """Iterate elements from "nft --json list set" output."""
    for toplevel in (data or {}).get('nftables', []):
        for elem in toplevel.get('set', {}).get('elem', []):
            ipaddr = elem['elem']['val']
            expire = elem['elem']['expires']
            if isinstance(ipaddr, str):
                yield ipaddr, expire
            else:
                yield (f'{ipaddr["prefix"]["addr"]}/'
                       f'{ipaddr["prefix"]["len"]}', expire)


def iplist_manual_state(iplist):
    """Save manual iplist status to state file.

    There is no locking so simultaneous add/del can result one missing/extra
    IP address. Next add/del will fix it.
    """
    manual = []
    known = active_sets()
    for setname, filenames in iplist.items():
        # Skip if not manual list, has filenames
        if filenames or not setname.startswith('@'):
            continue
        setname = setname[1:]
        if setname not in known:
            continue

        # Get set entries
        data = run_program_json([CONFIG['nft_bin'], '--json', 'list', 'set',
                                 'inet', 'foomuuri', setname])

        # Collect IP addresses
        now = datetime.datetime.now(datetime.timezone.utc)
        for ipaddr, expire in iterate_set_elements(data):
            stamp = (now + datetime.timedelta(seconds=expire)).strftime(
                '%Y-%m-%dT%H:%M:%S')
            manual.append(f'add element inet foomuuri {setname} '
                          f'{{ {ipaddr} timeout {expire}s }} # {stamp}')

    # Save manual list
    save_file(CONFIG['iplist_manual_file'], manual)


def command_iplist_list():
    """List iplist entries."""
    config = minimal_config()
    known = set()
    for item in (
            list(parse_resolve(config, 'iplist')) +
            list(parse_resolve(config, 'resolve'))
    ):
        if item.startswith('@'):
            known.add(item[:-2])

    ret = 1  # Default to no output, failure
    for setname in CONFIG['parameters'][1:] or sorted(known):
        if setname.startswith('@'):
            setname = setname[1:]
        if setname.endswith(('_4', '_6')):
            data4 = run_program_json([CONFIG['nft_bin'], '--json', 'list',
                                      'set', 'inet', 'foomuuri', setname])
            data6 = {}
        else:
            data4 = run_program_json([CONFIG['nft_bin'], '--json', 'list',
                                      'set', 'inet', 'foomuuri',
                                      f'{setname}_4'])
            data6 = run_program_json([CONFIG['nft_bin'], '--json', 'list',
                                      'set', 'inet', 'foomuuri',
                                      f'{setname}_6'])
        elem4 = [ipaddr for ipaddr, _dummy in iterate_set_elements(data4)]
        elem6 = [ipaddr for ipaddr, _dummy in iterate_set_elements(data6)]
        elems = sorted(elem4) + sorted(elem6)
        if not elems:
            print(f'@{setname}')
        else:
            ret = 0  # Outputted something, return ok
            for elem in elems:
                print(f'@{setname:20s}  {elem}')
    return ret


def command_iplist_add():
    """Add entries to iplist."""
    config = minimal_config()
    iplist = parse_resolve(config, 'iplist', '10d')
    timeout = iplist.pop('timeout')
    setname = CONFIG['parameters'][1]
    if setname.startswith('@'):
        setname = setname[1:]

    ret = 0
    for address in CONFIG['parameters'][2:]:
        ipv = is_ip_address(address)
        if not ipv:
            if address[-1] in 'smhd':
                timeout = address.replace(' ', '')
                continue
            fail(f'Invalid IP address {address}')
        table_set = f'inet foomuuri {setname}_{ipv}'
        ret += run_program_rc([CONFIG['nft_bin'],
                               f'add element {table_set} {{ {address} }}; '
                               f'delete element {table_set} {{ {address} }}; '
                               f'add element {table_set} {{ {address} '
                               f'timeout {timeout} }};'])
    iplist_manual_state(iplist)
    return ret


def command_iplist_del():
    """Delete entries from iplist."""
    config = minimal_config()
    iplist = parse_resolve(config, 'iplist')
    setname = CONFIG['parameters'][1]
    if setname.startswith('@'):
        setname = setname[1:]

    ret = 0
    for address in CONFIG['parameters'][2:]:
        ipv = is_ip_address(address)
        if not ipv:
            fail(f'Invalid IP address {address}')
        table_set = f'inet foomuuri {setname}_{ipv}'
        ret += run_program_rc([CONFIG['nft_bin'],
                               f'add element {table_set} {{ {address} }}; '
                               f'delete element {table_set} {{ {address} }};'])
    iplist_manual_state(iplist)
    return ret


def command_iplist_refresh():
    """Refresh iplist{} entries."""
    # pylint: disable=too-many-locals

    # Read minimal config and read iplist{} entries
    config = minimal_config()
    iplist = parse_resolve(config, 'iplist', '10d', '24h')
    timeout = iplist.pop('timeout')
    refresh = iplist.pop('refresh')
    previous, last_refresh = read_previous_resolve_iplist('iplist_file')
    for param in CONFIG['parameters'][1:]:  # Refresh parameters now
        if param.startswith('@'):
            param = param[1:]
        last_refresh[param] = None
    current = read_all_iplists(iplist, last_refresh, refresh)
    known = active_sets()

    # Output last_refresh entries
    for setname, timestamp in last_refresh.items():
        if f'@{setname}_4' in iplist and timestamp:
            out(f'# last_refresh {setname} {timestamp:%Y-%m-%dT%H:%M:%S}')

    # Output address entries
    warned = set()
    error = 0
    for setname, filenames in iplist.items():
        if not filenames:
            continue  # Don't touch sets without filenames

        setname = setname[1:]  # Strip "@"
        if current.get(setname[:-2]) == '_refresh':
            # It wasn't time to refresh
            comment = '# '
            addrlist = previous.get(setname, [])
        elif setname not in current:
            # Fetch for this set failed. Add previous fetch as comments
            error = 1
            comment = '# '
            addrlist = previous.get(setname, [])
            warn_resolve_iplist(setname, warned, 'Iplist', 'failed to refresh')
        elif setname not in known:
            # Not active in current ruleset, add items as comments
            comment = '# '
            addrlist = current[setname]
            warn_resolve_iplist(setname, warned, 'Iplist',
                                'does not exist in currently active firewall')
        else:
            # Success, update elements to ruleset.
            comment = ''
            out(f'flush set inet foomuuri {setname}')
            addrlist = current[setname]

        for ipaddr in sorted(addrlist):
            out(f'{comment}add element inet foomuuri {setname} '
                f'{{ {ipaddr} timeout {timeout} }}')

    # Apply update
    return apply_resolve_iplist('iplist_file', 'iplist', error)


def command_iplist():
    """Parse "foomuuri iplist" subcommand."""
    if len(CONFIG['parameters']) >= 1 and CONFIG['parameters'][0] == 'list':
        return command_iplist_list()
    if len(CONFIG['parameters']) >= 3 and CONFIG['parameters'][0] == 'add':
        return command_iplist_add()
    if len(CONFIG['parameters']) >= 3 and CONFIG['parameters'][0] == 'del':
        return command_iplist_del()
    if len(CONFIG['parameters']) >= 1 and CONFIG['parameters'][0] == 'refresh':
        return command_iplist_refresh()
    return command_help()


def command_list():
    """List currently active ruleset."""
    # List all
    if not CONFIG['parameters']:
        return run_program_rc([CONFIG['nft_bin'], 'list', 'ruleset'])

    # Parse list of possible zone-zone pairs
    config = minimal_config()
    zones = parse_config_zones(config)
    zonepairs = [f'{szone}-{dzone}' for szone in zones for dzone in zones]

    # List rules for single zone-zone, known macros, etc
    ret = 0
    for zone in CONFIG['parameters']:
        if zone == 'macro':
            # List macros. config{} doesn't have macro{} anymore so config
            # must be re-read.
            macros = parse_config_macros(read_config())
            print('macro {')
            for macro in sorted(macros):
                print(f'  {macro:15s} {" ".join(macros[macro])}')
            print('}')
        elif zone == 'counter':
            # List named counters
            ret += run_program_rc([CONFIG['nft_bin'], 'list', 'counters',
                                   'table', 'inet', 'foomuuri'])
        else:
            # List zone-zone
            if zone not in zonepairs:
                fail(f'Unknown zone-zone: {zone}')
            for ipv in (4, 6):
                ret += run_program_rc([CONFIG['nft_bin'], 'list', 'chain',
                                       'inet', 'foomuuri', f'{zone}_{ipv}'])
    return ret


def command_reload():
    """Run start and refresh resolve and iplist."""
    # Use same args
    args = [sys.argv[0]]
    for arg in sys.argv[1:]:
        if arg.startswith('--'):
            args.append(arg)

    # Run commands
    for sub, fatal in ((['start'], True),
                       (['resolve', 'refresh'], False),
                       (['iplist', 'refresh'], False)):
        ret = run_program_rc(args + sub)
        if ret and fatal:
            return ret
    return 0


def seconds_to_human(seconds):
    """Convert seconds int to human readable "19 days, 18:37" format."""
    day = seconds // 86400
    hour = (seconds // 3600) % 24
    minute = (seconds // 60) % 60
    second = seconds % 60
    if day:
        return f'{day} days, {hour:02d}:{minute:02d}:{second:02d}'
    return f'{hour:02d}:{minute:02d}:{second:02d}'


def monitor_state_command(targets, groups, cfg, grouptarget, name):
    """Run command if group/target state changes."""
    # pylint: disable=too-many-locals

    # Log state change
    updown = 'up' if cfg['state'] else 'down'
    now = time.time()
    prev = cfg.get('state_time')
    history = None
    if prev:
        seconds = int(now - prev + 0.5)
        extra = f'previous change was {seconds_to_human(seconds)} ago'
        if grouptarget == 'target':
            for cons in range(0, cfg['history_size']):
                if cfg['history'][-1 - cons] != cfg['state']:
                    break
            historycount = cfg['history'].count(cfg['state'])
            extra = (f'{extra}, consecutive_{updown} {cons}, '
                     f'history_{updown} {historycount}')
            history = ''.join('.' if item else '!' for item in cfg['history'])
    else:
        extra = 'startup change'
    print(f'{grouptarget} {name} changed state to {updown}, {extra}',
          flush=True)
    if history:
        print(f'{grouptarget} {name} history: {history}', flush=True)
    cfg['state_time'] = now

    # Run external command if configured. It will receive current state change
    # event info and all states in environment variables.
    env = {
        # Change state event
        'FOOMUURI_CHANGE_TYPE': grouptarget,
        'FOOMUURI_CHANGE_NAME': env_cleanup(name),
        'FOOMUURI_CHANGE_STATE': updown,
        'FOOMUURI_CHANGE_LOG': extra,
        'FOOMUURI_CHANGE_HISTORY': history or '',
        # List of configured targets
        'FOOMUURI_ALL_TARGET': ' '.join(env_cleanup(item) for item in targets),
        # List of configured groups
        'FOOMUURI_ALL_GROUP': ' '.join(env_cleanup(item) for item in groups),
    }
    for target, icfg in targets.items():
        env[f'FOOMUURI_TARGET_{env_cleanup(target)}'] = (
            'up' if icfg['state'] else 'down')
    for group, icfg in groups.items():
        env[f'FOOMUURI_GROUP_{env_cleanup(group)}'] = (
            'up' if icfg.get('state', True) else 'down')
    run_program_rc(cfg[f'command_{updown}'], env=env)


def monitor_update_groups(targets, groups):
    """Update all group statuses.

    On startup make decision and send event for all groups after first reply
    from any target.
    """
    for group, cfg in groups.items():
        any_up = any(targets[target]['state'] for target in cfg['target'])
        state = cfg.get('state', not any_up)  # Undef in startup
        if not any_up and state:
            cfg['state'] = False
            monitor_state_command(targets, groups, cfg, 'group', group)
        elif any_up and not state:
            cfg['state'] = True
            monitor_state_command(targets, groups, cfg, 'group', group)


def monitor_update_target(targets, groups, target, state):
    """Add state to target's history and change its state."""
    # Add new state to end of history
    cfg = targets[target]
    startup_change = False
    if 'history' not in cfg:
        # First reply ever, fill history and force change event
        cfg['history'] = [True] * cfg['history_size']
        startup_change = True
    cfg['history'] = cfg['history'][1:] + [state]

    # Target state can't change if added state is same as current state
    if cfg['state'] == state and not startup_change:
        return

    # Check if target state is changed
    count_up = sum(cfg['history'])
    if cfg['state']:
        # Currently up. Target goes down if:
        # - history has too many downs
        # OR
        # - last n items were down
        if (
                cfg['history_size'] - count_up >= cfg['history_down'] or
                not any(cfg['history'][-cfg['consecutive_down']:])
        ):
            cfg['state'] = False
            monitor_state_command(targets, groups, cfg, 'target', target)
        elif startup_change:  # Always send an event on startup
            monitor_state_command(targets, groups, cfg, 'target', target)

    else:
        # Currently down. Target goes up if:
        # - history has enough ups
        # AND
        # - last n items were up
        if (
                count_up >= cfg['history_up'] and
                all(cfg['history'][-cfg['consecutive_up']:])
        ):
            cfg['state'] = True
            monitor_state_command(targets, groups, cfg, 'target', target)


def monitor_parse_ping(targets, groups, target, line):
    """Parse fping's result line."""
    match = re.match(r'^[^ ]+ : \[\d+\], (.+)$', line)
    if not match:
        return
    result = match.group(1)
    state = ' bytes, ' in result and ' ms (' in result
    # print(f'{target:10}: {state}, {result}', flush=True)
    monitor_update_target(targets, groups, target, state)


def monitor_start_targets(targets):
    """Start pinging all targets."""
    for cfg in targets.values():
        # fping requires --loop, add it if missing
        cmd = cfg['command']
        if 'fping' in cmd[0] and '--loop' not in cmd:
            cmd = cmd[:1] + ['--loop'] + cmd[1:]

        # Assume it is up on startup
        cfg['state'] = True

        # Start command (pylint: disable=consider-using-with)
        cfg['proc'] = subprocess.Popen(cmd,
                                       stdin=subprocess.DEVNULL,
                                       stdout=subprocess.PIPE,
                                       stderr=subprocess.STDOUT,
                                       encoding='utf-8')

    # Small initial sleep so that first read_target() will receive more
    # replies in one go. This helps to get more "target up" events before
    # "group up" events.
    time.sleep(0.5)


def monitor_read_targets(targets, groups):
    """Read incoming lines from all targets."""
    # Wait for incoming lines from any client, up to 10 seconds
    readfd = [item['proc'].stdout for item in targets.values()]
    poll = select.select(readfd, [], [], 10.0)

    # Read all incoming lines
    for readfd in poll[0]:
        for target, cfg in targets.items():
            if cfg['proc'].stdout == readfd:
                monitor_parse_ping(targets, groups, target, readfd.readline())
                break

    # Update all groups
    monitor_update_groups(targets, groups)


def monitor_terminate_targets(targets):
    """Terminate all subprocesses."""
    for cfg in targets.values():
        cfg['proc'].terminate()
    for cfg in targets.values():
        cfg['proc'].wait(timeout=0.1)


def parse_config_targets(config):
    """Parse "target foo { ... }" entries from config."""
    targets = {}
    names = [item for item in config if item.startswith('target ')]
    for name in names:
        name = name[7:]
        cfg = targets[name] = {
            'command': [],
            'command_up': [],
            'command_down': [],
            'consecutive_up': 20,    # last n were UP        => UP
            'consecutive_down': 10,  # last n were DOWN      => DOWN
            'history_up': 80,        # count of UPs => n     => UP
            'history_down': 30,      # count of DOWNs >= n   => DOWN
            'history_size': 100,
            # Internal items:
            # 'state': True,
            # 'state_time': time(),
            # 'history': [True] * history_size,
            # 'proc': Popen()
        }
        fileline = ''
        for fileline, line in config.pop(f'target {name}'):
            if line[0] in ('command', 'command_up', 'command_down'):
                cfg[line[0]] = line[1:]
            else:
                if len(line) != 2:
                    fail(f'{fileline}Can\'t parse line: {" ".join(line)}')
                if line[0] not in cfg:
                    fail(f'{fileline}Unknown keyword: {" ".join(line)}')
                try:
                    cfg[line[0]] = int(line[1])
                except ValueError:
                    fail(f'{fileline}Invalid value: {" ".join(line)}')

        # Verify section
        if not cfg['command']:
            fail(f'{fileline}Missing "command" keyword in "target {name}"')
        for key in ('consecutive_up', 'consecutive_down',
                    'history_up', 'history_down'):
            if cfg[key] > cfg['history_size']:
                fail(f'{fileline}{key} is larger than history_size in '
                     f'"target {name}": {cfg[key]} > {cfg["history_size"]}')
        if cfg['history_size'] - cfg['history_up'] >= cfg['history_down']:
            print(f'{fileline}Warning: Possible up-down loop in '
                  f'"target {name}": history_size {cfg["history_size"]} - '
                  f'history_up {cfg["history_up"]} >= '
                  f'history_down {cfg["history_down"]}', flush=True)

    return targets


def parse_config_groups(config, targets):
    """Parse "group foo { ... }" entries from config."""
    groups = {}
    names = [item for item in config if item.startswith('group ')]
    for name in names:
        name = name[6:]
        groups[name] = {
            'target': [],
            'command_up': [],
            'command_down': [],
        }
        fileline = ''
        for fileline, line in config.pop(f'group {name}'):
            if line[0] not in groups[name]:
                fail(f'{fileline}Unknown keyword: {" ".join(line)}')
            groups[name][line[0]] = line[1:]

        # Verify section
        target = groups[name].get('target')
        if not target:
            fail(f'{fileline}Missing "target" keyword in "group {name}"')
        for item in target:
            if item not in targets:
                fail(f'{fileline}Undefined target "{item}" in "group {name}"')

    return groups


def command_monitor():
    """Monitor targets and run command when their state changes up or down."""
    CONFIG['keep_going'] = 1
    while CONFIG['keep_going']:
        # Read minimal config
        config = minimal_config()
        targets = parse_config_targets(config)
        groups = parse_config_groups(config, targets)

        # Exit silently with OK if no targets defined in configuration
        if not targets:
            if HAVE_NOTIFY:
                notify('READY=1')
                notify('STOPPING=1')
            return 0

        # Define reload/stop signal handler
        def signal_handler(sig, _dummy_frame):
            if sig == signal.SIGINT:
                CONFIG['keep_going'] = 0
                if HAVE_NOTIFY:
                    notify('STOPPING=1')
            else:
                CONFIG['keep_going'] = 2
                if HAVE_NOTIFY:
                    notify('RELOADING=1')

        CONFIG['keep_going'] = 1
        signal.signal(signal.SIGHUP, signal_handler)
        signal.signal(signal.SIGINT, signal_handler)
        save_file(CONFIG['run_dir'] / 'foomuuri-monitor.pid',
                  [str(os.getpid())])

        # Start monitoring
        monitor_start_targets(targets)
        print('Target monitor ready', flush=True)
        if HAVE_NOTIFY:
            notify('READY=1')
        while CONFIG['keep_going'] == 1:
            monitor_read_targets(targets, groups)
        monitor_terminate_targets(targets)

    return 0


def command_help():
    """Print command line help."""
    print(f'Foomuuri {VERSION}')
    print()
    print(f'Usage: {sys.argv[0]} {{options}} command')
    print()
    print('Available commands:')
    print()
    print('  start            Load configuration files and generate ruleset')
    print('  start-or-good    Same as start with fallback to previous "good"')
    print('  stop             Remove ruleset')
    print('  reload           Same as start, followed by resolve+iplist '
          'refresh')
    print('  check            Verify configuration files')
    print('  list             List active ruleset')
    print('  list zone-zone {zone-zone...}')
    print('                   List active ruleset for zone-zone')
    print('  list macro       List all known macros')
    print('  list counter     List all named counters')
    print('  iplist list      List entries in all configured iplists and '
          'resolves')
    print('  iplist list name {name...}')
    print('                   List entries in named iplist/resolve')
    print('  iplist add name {timeout} ipaddress {ipaddress...}')
    print('                   Add or refresh IP address to iplist')
    print('  iplist del name ipaddress {ipaddress...}')
    print('                   Delete IP address from iplist')
    print('  iplist refresh name {name...}')
    print('                   Refresh iplist @name now')
    print()
    print('Available options:')
    print()
    print('  --verbose        Verbose output')
    print('  --version        Print version')
    print('  --set=option=value')
    print('                   Set config option to value')
    print()
    print('Internally used commands:')
    print()
    print('  resolve refresh  Refresh resolve hostnames')
    print('  iplist refresh   Refresh iplist entries')
    print('  dbus             Start D-Bus daemon')
    print('  monitor          Start target monitor daemon')
    return 0


def parse_command_line():
    """Parse command line to CONFIG[command] and CONFIG[parameters]."""
    for arg in sys.argv[1:]:
        if arg == '--help':
            CONFIG['command'] = 'help'
        elif arg == '--verbose':
            CONFIG['verbose'] += 1
        elif arg == '--version':
            print(VERSION)
            sys.exit(0)
        elif arg == '--devel':
            CONFIG['devel'] = True
            CONFIG['etc_dir'] = '../foomuuri-devel'
            CONFIG['share_dir'] = 'not-used-as-etc-finds-these'
            CONFIG['state_dir'] = '../foomuuri-devel'
            CONFIG['run_dir'] = '../foomuuri-devel'
        elif arg.startswith('--set='):
            if arg.count('=') == 1:
                fail(f'Invalid syntax for --set=option=value: {arg}')
            _dummy, option, value = arg.split('=', 2)
            if option not in CONFIG:
                fail(f'Unknown foomuuri{{}} option: {arg}')
            CONFIG[option] = value
        elif not CONFIG['command']:
            CONFIG['command'] = arg
        else:
            CONFIG['parameters'].append(arg)
    if not CONFIG['command']:
        CONFIG['command'] = 'help'
    config_to_pathlib()


def run_command():
    """Run CONFIG[command]."""
    # Only some commands can take arguments
    if CONFIG['parameters'] and CONFIG['command'] not in (
            'list', 'resolve', 'iplist', 'help'):
        return command_help()

    # Help doesn't need root
    if CONFIG['command'] == 'help':
        return command_help()

    # Warning if not running as root
    if os.getuid() and not CONFIG['devel']:
        print('Warning: Foomuuri should be run as "root"', flush=True)

    # Run simple commands without ruleset output
    handler = {
        'reload': command_reload,
        'list': command_list,
        'resolve': command_resolve,
        'iplist': command_iplist,
        'dbus': command_dbus,
        'monitor': command_monitor,
    }.get(CONFIG['command'])
    if handler:
        return handler()

    # Run commands with ruleset output
    if CONFIG['command'] in ('start', 'start-or-good', 'check'):
        command_start()
    elif CONFIG['command'] == 'stop':
        command_stop()
    else:
        fail(f'Unknown command: {CONFIG["command"]}')

    # Save and apply changes
    save_final(CONFIG['next_file'])
    return apply_final()


def main():
    """Parse command line and run command."""
    parse_command_line()
    return run_command()


if __name__ == '__main__':
    try:
        sys.exit(main())
    except BrokenPipeError:
        # Python flushes standard streams on exit; redirect remaining output
        # to devnull to avoid another BrokenPipeError at shutdown
        os.dup2(os.open(os.devnull, os.O_WRONLY), sys.stdout.fileno())
        sys.exit(1)
