diff --git a/checkmk/checkmk-files/logwatch b/checkmk/checkmk-files/logwatch
deleted file mode 100644
index 0c93768f35a3748099db9fb3034b546ead882c82..0000000000000000000000000000000000000000
--- a/checkmk/checkmk-files/logwatch
+++ /dev/null
@@ -1,430 +0,0 @@
-#!/usr/bin/python
-# -*- encoding: utf-8; py-indent-offset: 4 -*-
-# +------------------------------------------------------------------+
-# |             ____ _               _        __  __ _  __           |
-# |            / ___| |__   ___  ___| | __   |  \/  | |/ /           |
-# |           | |   | '_ \ / _ \/ __| |/ /   | |\/| | ' /            |
-# |           | |___| | | |  __/ (__|   <    | |  | | . \            |
-# |            \____|_| |_|\___|\___|_|\_\___|_|  |_|_|\_\           |
-# |                                                                  |
-# | Copyright Mathias Kettner 2014             mk@mathias-kettner.de |
-# +------------------------------------------------------------------+
-#
-# This file is part of Check_MK.
-# The official homepage is at http://mathias-kettner.de/check_mk.
-#
-# check_mk is free software;  you can redistribute it and/or modify it
-# under the  terms of the  GNU General Public License  as published by
-# the Free Software Foundation in version 2.  check_mk is  distributed
-# in the hope that it will be useful, but WITHOUT ANY WARRANTY;  with-
-# out even the implied warranty of  MERCHANTABILITY  or  FITNESS FOR A
-# PARTICULAR PURPOSE. See the  GNU General Public License for more de-
-# tails. You should have  received  a copy of the  GNU  General Public
-# License along with GNU Make; see the file  COPYING.  If  not,  write
-# to the Free Software Foundation, Inc., 51 Franklin St,  Fifth Floor,
-# Boston, MA 02110-1301 USA.
-
-# Call with -d for debug mode: colored output, no saving of status
-
-import sys, os, re, time, glob
-
-if '-d' in sys.argv[1:] or '--debug' in sys.argv[1:]:
-    tty_red     = '\033[1;31m'
-    tty_green   = '\033[1;32m'
-    tty_yellow  = '\033[1;33m'
-    tty_blue    = '\033[1;34m'
-    tty_normal  = '\033[0m'
-    debug = True
-else:
-    tty_red     = ''
-    tty_green   = ''
-    tty_yellow  = ''
-    tty_blue    = ''
-    tty_normal  = ''
-    debug = False
-
-# The configuration file and status file are searched
-# in the directory named by the environment variable
-# LOGWATCH_DIR. If that is not set, MK_CONFDIR is used.
-# If that is not set either, the current directory ist
-# used.
-logwatch_dir = os.getenv("LOGWATCH_DIR")
-if logwatch_dir:
-    mk_confdir = logwatch_dir
-    mk_vardir = logwatch_dir
-else:
-    mk_confdir = os.getenv("MK_CONFDIR") or "."
-    mk_vardir = os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "."
-
-
-sys.stdout.write("<<<logwatch>>>\n")
-
-config_filename = mk_confdir + "/logwatch.cfg"
-config_dir      = mk_confdir + "/logwatch.d/*.cfg"
-
-
-# Determine the name of the state file
-# $REMOTE set                   -> logwatch.state.$REMOTE
-# $REMOTE not set and a tty     -> logwatch.state.local
-# $REMOTE not set and not a tty -> logwatch.state
-remote_hostname = os.getenv("REMOTE", "")
-remote_hostname = remote_hostname.replace(":", "_")
-if remote_hostname != "":
-    status_filename = "%s/logwatch.state.%s" % (mk_vardir, remote_hostname)
-else:
-    if sys.stdout.isatty():
-        status_filename = "%s/logwatch.state.local" % mk_vardir
-    else:
-        status_filename = "%s/logwatch.state" % mk_vardir
-
-# Copy the last known state from the logwatch.state when there is no status_filename yet.
-if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % mk_vardir):
-    import shutil
-    shutil.copy("%s/logwatch.state" % mk_vardir, status_filename)
-
-def is_not_comment(line):
-    if line.lstrip().startswith('#') or \
-       line.strip() == '':
-        return False
-    return True
-
-def parse_filenames(line):
-    return line.split()
-
-def parse_pattern(level, pattern, line):
-    if level not in [ 'C', 'W', 'I', 'O' ]:
-        raise Exception("Invalid pattern line '%s'" % line)
-
-    try:
-        compiled = re.compile(pattern)
-    except:
-        raise Exception("Invalid regular expression in line '%s'" % line)
-
-    return (level, compiled)
-
-def read_config():
-    config_lines = [ line.rstrip() for line in filter(is_not_comment, file(config_filename).readlines()) ]
-    # Add config from a logwatch.d folder
-    for config_file in glob.glob(config_dir):
-        config_lines += [ line.rstrip() for line in filter(is_not_comment, file(config_file).readlines()) ]
-
-    have_filenames = False
-    config = []
-    cont_list = []
-    rewrite_list = []
-
-    for line in config_lines:
-        if line[0].isspace(): # pattern line
-            if not have_filenames:
-                raise Exception("Missing logfile names")
-
-            level, pattern = line.split(None, 1)
-
-            if level == 'A':
-                cont_list.append(parse_cont_pattern(pattern))
-            elif level == 'R':
-                rewrite_list.append(pattern)
-            else:
-                level, compiled = parse_pattern(level, pattern, line)
-                # New pattern for line matching => clear continuation and rewrite patterns
-                cont_list = []
-                rewrite_list = []
-                patterns.append((level, compiled, cont_list, rewrite_list))
-
-        else: # filename line
-            patterns = []
-            cont_list = [] # Clear list of continuation patterns from last file
-            rewrite_list = [] # Same for rewrite patterns
-            config.append((parse_filenames(line), patterns))
-            have_filenames = True
-    return config
-
-def parse_cont_pattern(pattern):
-    try:
-        return int(pattern)
-    except:
-        try:
-            return re.compile(pattern)
-        except:
-            if debug:
-                raise
-            raise Exception("Invalid regular expression in line '%s'" % pattern)
-
-# structure of statusfile
-# # LOGFILE         OFFSET    INODE
-# /var/log/messages|7767698|32455445
-# /var/test/x12134.log|12345|32444355
-def read_status():
-    if debug:
-        return {}
-
-    status = {}
-    for line in file(status_filename):
-        # TODO: Remove variants with spaces. rsplit is
-        # not portable. split fails if logfilename contains
-        # spaces
-        inode = -1
-        try:
-            parts = line.split('|')
-            filename = parts[0]
-            offset = parts[1]
-            if len(parts) >= 3:
-                inode = parts[2]
-
-        except:
-            try:
-                filename, offset = line.rsplit(None, 1)
-            except:
-                filename, offset = line.split(None, 1)
-        status[filename] = int(offset), int(inode)
-    return status
-
-def save_status(status):
-    f = file(status_filename, "w")
-    for filename, (offset, inode) in status.items():
-        f.write("%s|%d|%d\n" % (filename, offset, inode))
-
-pushed_back_line = None
-def next_line(file_handle):
-    global pushed_back_line
-    if pushed_back_line != None:
-        line = pushed_back_line
-        pushed_back_line = None
-        return line
-    else:
-        try:
-            line = file_handle.next()
-            # Avoid parsing of (yet) incomplete lines (when acutal application
-            # is just in the process of writing)
-            if not line.endswith(os.linesep):
-                begin_of_line_offset = file_handle.tell() - len(line)
-                os.lseek(file_handle.fileno(), begin_of_line_offset, 0)
-                return None
-            return line
-        except:
-            return None
-
-
-def process_logfile(logfile, patterns):
-    global pushed_back_line
-
-    # Look at which file offset we have finished scanning
-    # the logfile last time. If we have never seen this file
-    # before, we set the offset to -1
-    offset, prev_inode = status.get(logfile, (-1, -1))
-    try:
-        file_desc = os.open(logfile, os.O_RDONLY)
-        inode = os.fstat(file_desc)[1] # 1 = st_ino
-    except:
-        if debug:
-            raise
-        sys.stdout.write("[[[%s:cannotopen]]]\n" % logfile)
-        return
-
-    sys.stdout.write("[[[%s]]]\n" % logfile)
-
-    # Seek to the current end in order to determine file size
-    current_end = os.lseek(file_desc, 0, 2) # os.SEEK_END not available in Python 2.4
-    status[logfile] = current_end, inode
-
-    # If we have never seen this file before, we just set the
-    # current pointer to the file end. We do not want to make
-    # a fuss about ancient log messages...
-    if offset == -1:
-	if not debug:
-            return
-    	else:
-	    offset = 0
-
-
-    # If the inode of the logfile has changed it has appearently
-    # been started from new (logfile rotation). At least we must
-    # assume that. In some rare cases (restore of a backup, etc)
-    # we are wrong and resend old log messages
-    if prev_inode >= 0 and inode != prev_inode:
-        offset = 0
-
-    # Our previously stored offset is the current end ->
-    # no new lines in this file
-    if offset == current_end:
-        return # nothing new
-
-    # If our offset is beyond the current end, the logfile has been
-    # truncated or wrapped while keeping the same inode. We assume
-    # that it contains all new data in that case and restart from
-    # offset 0.
-    if offset > current_end:
-        offset = 0
-
-    # now seek to offset where interesting data begins
-    os.lseek(file_desc, offset, 0) # os.SEEK_SET not available in Python 2.4
-    file_handle = os.fdopen(file_desc)
-    worst = -1
-    outputtxt = ""
-    lines_parsed = 0
-    start_time = time.time()
-
-    while True:
-        line = next_line(file_handle)
-        if line == None:
-            break # End of file
-
-        # Handle option maxlinesize
-        if opt_maxlinesize != None and len(line) > opt_maxlinesize:
-            line = line[:opt_maxlinesize] + "[TRUNCATED]\n"
-
-        lines_parsed += 1
-        # Check if maximum number of new log messages is exceeded
-        if opt_maxlines != None and lines_parsed > opt_maxlines:
-            outputtxt += "%s Maximum number (%d) of new log messages exceeded.\n" % (
-               opt_overflow, opt_maxlines)
-            worst = max(worst, opt_overflow_level)
-            os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
-            break
-
-        # Check if maximum processing time (per file) is exceeded. Check only
-        # every 100'th line in order to save system calls
-        if opt_maxtime != None and lines_parsed % 100 == 10 \
-            and time.time() - start_time > opt_maxtime:
-            outputtxt += "%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
-               opt_overflow, opt_maxtime)
-            worst = max(worst, opt_overflow_level)
-            os.lseek(file_desc, 0, 2) # Seek to end of file, skip all other messages
-            break
-
-        level = "."
-        for lev, pattern, cont_patterns, replacements in patterns:
-            matches = pattern.search(line[:-1])
-            if matches:
-                level = lev
-                levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
-                worst = max(levelint, worst)
-
-                # Check for continuation lines
-                for cont_pattern in cont_patterns:
-                    if type(cont_pattern) == int: # add that many lines
-                        for _unused_x in range(cont_pattern):
-                            cont_line = next_line(file_handle)
-                            if cont_line == None: # end of file
-                                break
-                            line = line[:-1] + "\1" + cont_line
-
-                    else: # pattern is regex
-                        while True:
-                            cont_line = next_line(file_handle)
-                            if cont_line == None: # end of file
-                                break
-                            elif cont_pattern.search(cont_line[:-1]):
-                                line = line[:-1] + "\1" + cont_line
-                            else:
-                                pushed_back_line = cont_line # sorry for stealing this line
-                                break
-
-                # Replacement
-                for replace in replacements:
-                    line = replace.replace('\\0', line.rstrip()) + "\n"
-                    for nr, group in enumerate(matches.groups()):
-                        line = line.replace('\\%d' % (nr+1), group)
-
-                break # matching rule found and executed
-
-        color = {'C': tty_red, 'W': tty_yellow, 'O': tty_green, 'I': tty_blue, '.': ''}[level]
-        if debug:
-            line = line.replace("\1", "\nCONT:")
-        if level == "I":
-            level = "."
-        if opt_nocontext and level == '.':
-            continue
-        outputtxt += "%s%s %s%s\n" % (color, level, line[:-1], tty_normal)
-
-    new_offset = os.lseek(file_desc, 0, 1) # os.SEEK_CUR not available in Python 2.4
-    status[logfile] = new_offset, inode
-
-    # output all lines if at least one warning, error or ok has been found
-    if worst > -1:
-        sys.stdout.write(outputtxt)
-        sys.stdout.flush()
-
-    # Handle option maxfilesize, regardless of warning or errors that have happened
-    if opt_maxfilesize != None and (offset / opt_maxfilesize) < (new_offset / opt_maxfilesize):
-        sys.stdout.write("%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
-                  (tty_yellow, opt_maxfilesize, new_offset / opt_maxfilesize, tty_normal))
-
-
-try:
-    config = read_config()
-except Exception, e:
-    if debug:
-        raise
-    sys.stdout.write("CANNOT READ CONFIG FILE: %s\n" % e)
-    sys.exit(1)
-
-# Simply ignore errors in the status file.  In case of a corrupted status file we simply begin
-# with an empty status. That keeps the monitoring up and running - even if we might lose a
-# message in the extreme case of a corrupted status file.
-try:
-    status = read_status()
-except Exception, e:
-    status = {}
-
-
-logfile_patterns = {}
-# The filename line may contain options like 'maxlines=100' or 'maxtime=10'
-for filenames, patterns in config:
-    # Initialize options with default values
-    opt_maxlines = None
-    opt_maxtime = None
-    opt_maxlinesize = None
-    opt_maxfilesize = None
-    opt_regex = None
-    opt_overflow = 'C'
-    opt_overflow_level = 2
-    opt_nocontext = False
-    try:
-        options = [ o.split('=', 1) for o in filenames if '=' in o ]
-        for key, value in options:
-            if key == 'maxlines':
-                opt_maxlines = int(value)
-            elif key == 'maxtime':
-                opt_maxtime = float(value)
-            elif key == 'maxlinesize':
-                opt_maxlinesize = int(value)
-            elif key == 'maxfilesize':
-                opt_maxfilesize = int(value)
-            elif key == 'overflow':
-                if value not in [ 'C', 'I', 'W', 'O' ]:
-                    raise Exception("Invalid value %s for overflow. Allowed are C, I, O and W" % value)
-                opt_overflow = value
-                opt_overflow_level = {'C':2, 'W':1, 'O':0, 'I':0}[value]
-            elif key == 'regex':
-                opt_regex = re.compile(value)
-            elif key == 'iregex':
-                opt_regex = re.compile(value, re.I)
-            elif key == 'nocontext':
-                opt_nocontext = True
-            else:
-                raise Exception("Invalid option %s" % key)
-    except Exception, e:
-        if debug:
-            raise
-        sys.stdout.write("INVALID CONFIGURATION: %s\n" % e)
-        sys.exit(1)
-
-
-    for glob_pattern in filenames:
-        if '=' in glob_pattern:
-            continue
-        logfiles = glob.glob(glob_pattern)
-        if opt_regex:
-            logfiles = [ f for f in logfiles if opt_regex.search(f) ]
-        if len(logfiles) == 0:
-            sys.stdout.write('[[[%s:missing]]]\n' % glob_pattern)
-        else:
-            for logfile in logfiles:
-                logfile_patterns[logfile] = logfile_patterns.get(logfile, []) + patterns
-
-for logfile, patterns in logfile_patterns.items():
-    process_logfile(logfile, patterns)
-
-if not debug:
-    save_status(status)
diff --git a/checkmk/checkmk-files/mk_logwatch.py b/checkmk/checkmk-files/mk_logwatch.py
new file mode 100644
index 0000000000000000000000000000000000000000..9282ae0cc67a1160a8364073e163607ff44b3647
--- /dev/null
+++ b/checkmk/checkmk-files/mk_logwatch.py
@@ -0,0 +1,1163 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+# Copyright (C) 2019 tribe29 GmbH - License: GNU General Public License v2
+# This file is part of Checkmk (https://checkmk.com). It is subject to the terms and
+# conditions defined in the file COPYING, which is part of this source code package.
+"""mk_logwatch
+This is the Check_MK Agent plugin. If configured it will be called by the
+agent without arguments.
+
+Options:
+    -d               Debug mode: Colored output, no saving of status.
+    -c CONFIG_FILE   Use this config file
+    -h               Show help.
+    --no_state       No state
+    -v               Verbose output for debugging purposes (no debug mode).
+
+You should find an example configuration file at
+'../cfg_examples/logwatch.cfg' relative to this file.
+"""
+
+from __future__ import with_statement
+
+__version__ = "2.0.0p12"
+
+import sys
+if sys.version_info < (2, 6):
+    sys.stderr.write("ERROR: Python 2.5 is not supported. Please use Python 2.6 or newer.\n")
+    sys.exit(1)
+
+import io
+import glob
+import logging
+import os
+import re
+import shutil
+import time
+import socket
+import binascii
+import platform
+import locale
+import ast
+
+import shlex
+
+# For Python 3 sys.stdout creates \r\n as newline for Windows.
+# Checkmk can't handle this therefore we rewrite sys.stdout to a new_stdout function.
+# If you want to use the old behaviour just use old_stdout.
+if sys.version_info[0] >= 3:
+    new_stdout = io.TextIOWrapper(sys.stdout.buffer,
+                                  newline='\n',
+                                  encoding=sys.stdout.encoding,
+                                  errors=sys.stdout.errors)
+    old_stdout, sys.stdout = sys.stdout, new_stdout
+
+MK_VARDIR = os.getenv("LOGWATCH_DIR") or os.getenv("MK_VARDIR") or os.getenv("MK_STATEDIR") or "."
+
+MK_CONFDIR = os.getenv("LOGWATCH_DIR") or os.getenv("MK_CONFDIR") or "."
+
+LOGGER = logging.getLogger(__name__)
+
+IPV4_REGEX = re.compile(r"^(::ffff:|::ffff:0:|)(?:[0-9]{1,3}\.){3}[0-9]{1,3}$")
+
+IPV6_REGEX = re.compile(r"^(?:[A-F0-9]{1,4}:){7}[A-F0-9]{1,4}$")
+
+ENCODINGS = (
+    (b'\xFF\xFE', "utf_16"),
+    (b'\xFE\xFF', "utf_16_be"),
+)
+
+TTY_COLORS = {
+    'C': '\033[1;31m',  # red
+    'W': '\033[1;33m',  # yellow
+    'O': '\033[1;32m',  # green
+    'I': '\033[1;34m',  # blue
+    '.': '',  # remain same
+    'normal': '\033[0m',
+}
+
+CONFIG_ERROR_PREFIX = "CANNOT READ CONFIG FILE: "  # detected by check plugin
+
+PY2 = sys.version_info[0] == 2
+PY3 = sys.version_info[0] == 3
+
+if PY3:
+    text_type = str
+    binary_type = bytes
+else:
+    text_type = unicode  # pylint: disable=undefined-variable
+    binary_type = str
+
+
+# Borrowed from six
+def ensure_str(s, encoding='utf-8', errors='strict'):
+    """Coerce *s* to `str`.
+
+    For Python 2:
+      - `unicode` -> encoded to `str`
+      - `str` -> `str`
+
+    For Python 3:
+      - `str` -> `str`
+      - `bytes` -> decoded to `str`
+    """
+    if not isinstance(s, (text_type, binary_type)):
+        raise TypeError("not expecting type '%s'" % type(s))
+    if PY2 and isinstance(s, text_type):
+        s = s.encode(encoding, errors)
+    elif PY3 and isinstance(s, binary_type):
+        s = s.decode(encoding, errors)
+    return s
+
+
+def init_logging(verbosity):
+    if verbosity == 0:
+        LOGGER.propagate = False
+        logging.basicConfig(level=logging.ERROR, format="%(levelname)s: %(message)s")
+    elif verbosity == 1:
+        logging.basicConfig(level=logging.INFO, format="%(levelname)s: %(message)s")
+    else:
+        logging.basicConfig(level=logging.DEBUG, format="%(levelname)s: %(lineno)s: %(message)s")
+
+
+class ArgsParser(object):  # pylint: disable=too-few-public-methods, useless-object-inheritance
+    """
+    Custom argument parsing.
+    (Neither use optparse which is Python 2.3 to 2.7 only.
+    Nor use argparse which is Python 2.7 onwards only.)
+    """
+    def __init__(self, argv):
+        super(ArgsParser, self).__init__()
+
+        if "-h" in argv:
+            sys.stderr.write(ensure_str(__doc__))
+            sys.exit(0)
+
+        self.verbosity = argv.count('-v') + 2 * argv.count('-vv')
+        self.config = argv[argv.index('-c') + 1] if '-c' in argv else None
+        self.debug = '-d' in argv or '--debug' in argv
+        self.no_state = '--no_state' in argv
+
+
+#   .--MEI-Cleanup---------------------------------------------------------.
+#   |     __  __ _____ ___       ____ _                                    |
+#   |    |  \/  | ____|_ _|     / ___| | ___  __ _ _ __  _   _ _ __        |
+#   |    | |\/| |  _|  | |_____| |   | |/ _ \/ _` | '_ \| | | | '_ \       |
+#   |    | |  | | |___ | |_____| |___| |  __/ (_| | | | | |_| | |_) |      |
+#   |    |_|  |_|_____|___|     \____|_|\___|\__,_|_| |_|\__,_| .__/       |
+#   |                                                         |_|          |
+#   +----------------------------------------------------------------------+
+# In case the program crashes or is killed in a hard way, the frozen binary .exe
+# may leave temporary directories named "_MEI..." in the temporary path. Clean them
+# up to prevent eating disk space over time.
+
+
+class MEIFolderCleaner(object):  # pylint: disable=useless-object-inheritance
+    def pid_running(self, pid):
+        import ctypes
+        kernel32 = ctypes.windll.kernel32  # type: ignore[attr-defined]
+        SYNCHRONIZE = 0x100000
+
+        process = kernel32.OpenProcess(SYNCHRONIZE, 0, pid)
+
+        if process != 0:
+            kernel32.CloseHandle(process)
+            return True
+        return False
+
+    def find_and_remove_leftover_folders(self, hint_filenames):
+        if not hasattr(sys, "frozen"):
+            return
+
+        import win32file  # type: ignore[import] # pylint: disable=import-error
+        import tempfile
+        base_path = tempfile.gettempdir()
+        for f in os.listdir(base_path):
+            try:
+                path = os.path.join(base_path, f)
+
+                if not os.path.isdir(path):
+                    continue
+
+                # Only care about directories related to our program
+                invalid_dir = False
+                for hint_filename in hint_filenames:
+                    if not os.path.exists(os.path.join(path, hint_filename)):
+                        invalid_dir = True
+                        break
+                if invalid_dir:
+                    continue
+
+                pyinstaller_tmp_path = win32file.GetLongPathName(
+                    sys._MEIPASS).lower()  # type: ignore[attr-defined]
+                if pyinstaller_tmp_path == path.lower():
+                    continue  # Skip our own directory
+
+                # Extract the process id from the directory and check whether or not it is still
+                # running. Don't delete directories of running processes!
+                # The name of the temporary directories is "_MEI<PID><NR>". We try to extract the PID
+                # by stripping of a single digit from the right. In the hope the NR is a single digit
+                # in all relevant cases.
+                pid = int(f[4:-1])
+                if self.pid_running(pid):
+                    continue
+
+                shutil.rmtree(path)
+            except Exception as e:
+                LOGGER.debug("Finding and removing leftover folders failed: %s", e)
+
+
+def get_status_filename(cluster_config):
+    """
+    Side effect:
+    - Depend on ENV var.
+    - In case agent plugin is called with debug option set -> depends on global
+      LOGGER and stdout.
+
+    Determine the name of the state file dependent on ENV variable and config:
+    $REMOTE set, no cluster set or no ip match -> logwatch.state.<formatted-REMOTE>
+    $REMOTE set, cluster set and ip match      -> logwatch.state.<cluster-name>
+    $REMOTE not set and a tty                  -> logwatch.state.local
+    $REMOTE not set and not a tty              -> logwatch.state
+
+    $REMOTE is determined by the check_mk_agent and varies dependent on how the
+    check_mk_agent is accessed:
+    - telnet ($REMOTE_HOST): $REMOTE is in IPv6 notation. IPv4 is extended to IPv6
+                             notation e.g. ::ffff:127.0.0.1
+    - ssh ($SSH_CLIENT): $REMOTE is either in IPv4 or IPv6 notation dependent on the
+                         IP family of the remote host.
+
+    <formatted-REMOTE> is REMOTE with colons (:) replaced with underscores (_) for
+    IPv6 address, is to IPv6 notation extended address with colons (:) replaced with
+    underscores (_) for IPv4 address or is plain $REMOTE in case it does not match
+    an IPv4 or IPv6 address.
+    """
+    remote = os.getenv("REMOTE", os.getenv("REMOTE_ADDR"))
+    if not remote:
+        status_filename = "logwatch.state" + (".local" if sys.stdout.isatty() else "")
+        return os.path.join(MK_VARDIR, status_filename)
+    remote_hostname = remote.replace(":", "_")
+
+    match = IPV4_REGEX.match(remote) or IPV6_REGEX.match(remote)
+    if not match:
+        LOGGER.debug("REMOTE %r neither IPv4 nor IPv6 address.", remote)
+        return os.path.join(MK_VARDIR, "logwatch.state.%s" % remote_hostname)
+
+    remote_ip = match.group()
+    # in case of IPv4 extended to IPv6 get rid of prefix for ip match lookup
+    if remote_ip.startswith("::ffff:"):
+        remote_ip = remote_ip[7:]
+
+    # In case cluster configured map ip to cluster name if configured.
+    # key "name" is mandatory and unique for cluster dicts
+    cluster_name = remote_hostname
+    for conf in cluster_config:
+        for ip_or_subnet in conf.ips_or_subnets:
+            if ip_in_subnetwork(remote_ip, ip_or_subnet):
+                # Cluster name may not contain whitespaces (must be provided from
+                # the WATO config as type ID or hostname).
+                cluster_name = conf.name
+                LOGGER.info("Matching cluster ip %s", remote_ip)
+                LOGGER.info("Matching cluster name %s", cluster_name)
+    status_filename = os.path.join(MK_VARDIR, "logwatch.state.%s" % cluster_name)
+    LOGGER.info("Status filename: %s", status_filename)
+    return status_filename
+
+
+def is_comment(line):
+    return line.lstrip().startswith('#')
+
+
+def is_empty(line):
+    return line.strip() == ""
+
+
+def is_indented(line):
+    return line.startswith(" ")
+
+
+def parse_filenames(line):
+    if platform.system() == "Windows":
+        # we can't use pathlib: Python 2.5 has no pathlib
+        # to garantie that backslash is escaped
+        _processed_line = line.replace('\\', '/')
+        _processed_line = os.path.normpath(_processed_line)
+        _processed_line = _processed_line.replace('\\', '\\\\')
+        return shlex.split(_processed_line)
+
+    if sys.version_info[0] < 3:
+        return [x.decode("utf-8") for x in shlex.split(line.encode("utf-8"))]
+
+    return shlex.split(line)
+
+
+def get_config_files(directory, config_file_arg=None):
+    if config_file_arg is not None:
+        return [config_file_arg]
+
+    config_file_paths = []
+    config_file_paths.append(directory + "/logwatch.cfg")
+    # Add config file paths from a logwatch.d folder
+    for config_file in glob.glob(directory + "/logwatch.d/*.cfg"):
+        config_file_paths.append(config_file)
+    LOGGER.info("Configuration file paths: %r", config_file_paths)
+    return config_file_paths
+
+
+def iter_config_lines(files, debug=False):
+    for file_ in files:
+        try:
+            with open(file_, 'rb') as fid:
+                try:
+                    decoded = (line.decode('utf-8') for line in fid)
+                    for line in decoded:
+                        if not is_comment(line) and not is_empty(line):
+                            yield line.rstrip()
+                except UnicodeDecodeError:
+                    msg = "Error reading file %r (please use utf-8 encoding!)\n" % file_
+                    sys.stdout.write(CONFIG_ERROR_PREFIX + msg)
+        except IOError:
+            if debug:
+                raise
+
+
+def consume_cluster_definition(config_lines):
+    cluster_name = config_lines.pop(0)[8:].strip()  # e.g.: CLUSTER duck
+    cluster = ClusterConfigBlock(cluster_name, [])
+    LOGGER.debug("new ClusterConfigBlock: %s", cluster_name)
+
+    while config_lines and is_indented(config_lines[0]):
+        cluster.ips_or_subnets.append(config_lines.pop(0).strip())
+
+    return cluster
+
+
+def consume_logfile_definition(config_lines):
+    cont_list = []
+    rewrite_list = []
+    filenames = parse_filenames(config_lines.pop(0))
+    logfiles = PatternConfigBlock(filenames, [])
+    LOGGER.debug("new PatternConfigBlock: %s", filenames)
+
+    while config_lines and is_indented(config_lines[0]):
+        line = config_lines.pop(0)
+        level, raw_pattern = line.split(None, 1)
+
+        if level == 'A':
+            cont_list.append(raw_pattern)
+
+        elif level == 'R':
+            rewrite_list.append(raw_pattern)
+
+        elif level in ('C', 'W', 'I', 'O'):
+            # New pattern for line matching => clear continuation and rewrite patterns
+            cont_list = []
+            rewrite_list = []
+            pattern = (level, raw_pattern, cont_list, rewrite_list)
+            logfiles.patterns.append(pattern)
+            LOGGER.debug("pattern %s", pattern)
+
+        else:
+            raise ValueError("Invalid level in pattern line %r" % line)
+
+    return logfiles
+
+
+def read_config(files, debug=False):
+    """
+    Read logwatch.cfg (patterns, cluster mapping, etc.).
+
+    Side effect: Reads filesystem files logwatch.cfg and /logwatch.d/*.cfg
+
+    Returns configuration as list. List elements are namedtuples.
+    Namedtuple either describes logile patterns and is PatternConfigBlock(files, patterns).
+    Or tuple describes optional cluster mapping and is ClusterConfigBlock(name, ips_or_subnets)
+    with ips as list of strings.
+    """
+    LOGGER.debug("Config files: %r", files)
+
+    logfiles_configs = []
+    cluster_configs = []
+    config_lines = list(iter_config_lines(files, debug=debug))
+
+    # parsing has to consider the following possible lines:
+    # - comment lines (begin with #)
+    # - logfiles line (begin not with #, are not empty and do not contain CLUSTER)
+    # - cluster lines (begin with CLUSTER)
+    # - logfiles patterns (follow logfiles lines, begin with whitespace)
+    # - cluster ips or subnets (follow cluster lines, begin with whitespace)
+    # Needs to consider end of lines to append ips/subnets to clusters as well.
+
+    while config_lines:
+        first_line = config_lines[0]
+        if is_indented(first_line):
+            raise ValueError("Missing block definition for line %r" % first_line)
+
+        if first_line.startswith("CLUSTER "):
+            cluster_configs.append(consume_cluster_definition(config_lines))
+        else:
+            logfiles_configs.append(consume_logfile_definition(config_lines))
+
+    LOGGER.info("Logfiles configurations: %r", logfiles_configs)
+    LOGGER.info("Optional cluster configurations: %r", cluster_configs)
+    return logfiles_configs, cluster_configs
+
+
+class State(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, filename, data=None):
+        super(State, self).__init__()
+        self.filename = filename
+        self._data = data or {}
+
+    @staticmethod
+    def _load_line(line):
+        try:
+            return ast.literal_eval(line)
+        except (NameError, SyntaxError, ValueError):
+            # Support status files with the following structure:
+            # /var/log/messages|7767698|32455445
+            # These were used prior to to 1.7.0i1
+            parts = line.split('|')
+            filename, offset = parts[0], int(parts[1])
+            inode = int(parts[2]) if len(parts) >= 3 else -1
+            return {'file': filename, 'offset': offset, 'inode': inode}
+
+    def read(self):
+        """Read state from file
+        Support state files with the following structure:
+        {'file': b'/var/log/messages', 'offset': 7767698, 'inode': 32455445}
+        """
+        LOGGER.debug("Reading state file: %r", self.filename)
+
+        with open(self.filename) as stat_fh:
+            for line in stat_fh:
+                line_data = self._load_line(line)
+                self._data[line_data['file']] = line_data
+
+        LOGGER.info("Read state: %r", self._data)
+        return self
+
+    def write(self):
+        LOGGER.debug("Writing state: %r", self._data)
+        LOGGER.debug("State filename: %r", self.filename)
+
+        with open(self.filename, "wb") as stat_fh:
+            for data in self._data.values():
+                stat_fh.write(repr(data).encode("ascii") + b"\n")
+
+    def get(self, key):
+        return self._data.setdefault(key, {'file': key})
+
+
+class LogLinesIter(object):  # pylint: disable=useless-object-inheritance
+    # this is supposed to become a proper iterator.
+    # for now, we need a persistent buffer to fix things
+    BLOCKSIZE = 8192
+
+    def __init__(self, logfile, encoding):
+        super(LogLinesIter, self).__init__()
+        self._fd = os.open(logfile, os.O_RDONLY)
+        self._lines = []  # List[Text]
+        self._buffer = b''
+        self._reached_end = False  # used for optimization only
+        self._enc = encoding or self._get_encoding()
+        self._nl = u'\n'
+        # for Windows we need a bit special processing. It is difficult to fit this processing
+        # in current architecture smoothly
+        self._utf16 = self._enc == "utf_16"
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_val, exc_tb):
+        self.close()
+        return False  # Do not swallow exceptions
+
+    def close(self):
+        os.close(self._fd)
+
+    def _get_encoding(self):
+        # In 1.5 this was only used when logwatch is executed on windows.
+        # On linux the log lines were not decoded at all.
+        #
+        # For 1.6 we want to follow the standard approach to decode things read
+        # from external sources as soon as possible. We also want to ensure that
+        # the output of this script is always UTF-8 encoded later.
+        #
+        # In case the current approach does not work out, then have a look here
+        # for possible more robust solutions:
+        # http://python-notes.curiousefficiency.org/en/latest/python3/text_file_processing.html
+        enc_bytes_len = max(len(bom) for bom, _enc in ENCODINGS)
+        self._buffer = os.read(self._fd, enc_bytes_len)
+        for bom, encoding in ENCODINGS:
+            if self._buffer.startswith(bom):
+                self._buffer = self._buffer[len(bom):]
+                LOGGER.debug("Detected %r encoding by BOM", encoding)
+                return encoding
+
+        pref_encoding = locale.getpreferredencoding()
+        encoding = "utf_8" if not pref_encoding or pref_encoding == "ANSI_X3.4-1968" else pref_encoding
+        LOGGER.debug("Locale Preferred encoding is %s, using %s", pref_encoding, encoding)
+        return encoding
+
+    def _update_lines(self):
+        """
+        Try to read more lines from file.
+        """
+        binary_nl = self._nl.encode(self._enc)
+        while binary_nl not in self._buffer:
+            new_bytes = os.read(self._fd, LogLinesIter.BLOCKSIZE)
+            if not new_bytes:
+                break
+            self._buffer += new_bytes
+
+        # in case of decoding error, replace with U+FFFD REPLACEMENT CHARACTER
+        raw_lines = self._buffer.decode(self._enc, "replace").split(self._nl)
+        self._buffer = raw_lines.pop().encode(self._enc)  # unfinished line
+        self._lines.extend(l + self._nl for l in raw_lines)
+
+    def set_position(self, position):
+        if position is None:
+            return
+        self._buffer = b''
+        self._lines = []
+        os.lseek(self._fd, position, os.SEEK_SET)
+
+    def get_position(self):
+        """
+        Return the position where we want to continue next time
+        """
+        pointer_pos = os.lseek(self._fd, 0, os.SEEK_CUR)
+        bytes_unused = sum((len(l.encode(self._enc)) for l in self._lines), len(self._buffer))
+        return pointer_pos - bytes_unused
+
+    def skip_remaining(self):
+        os.lseek(self._fd, 0, os.SEEK_END)
+        self._buffer = b''
+        self._lines = []
+
+    def push_back_line(self, line):
+        self._lines.insert(0, line)
+
+    def next_line(self):
+        if self._reached_end:  # optimization only
+            return None
+
+        if not self._lines:
+            self._update_lines()
+
+        if self._lines:
+            return self._lines.pop(0)
+
+        self._reached_end = True
+        return None
+
+
+def is_inode_capable(path):
+    system = platform.system()
+    if system == "Windows":
+        volume_name = "%s:\\\\" % path.split(":", 1)[0]
+        import win32api  # type: ignore[import] # pylint: disable=import-error
+        volume_info = win32api.GetVolumeInformation(volume_name)
+        volume_type = volume_info[-1]
+        return "ntfs" in volume_type.lower()
+    return system == "Linux"
+
+
+def process_logfile(section, filestate, debug):
+    """
+    Returns tuple of (
+        logfile lines,
+        warning and/or error indicator,
+        warning and/or error lines,
+    ).
+    In case the file has never been seen before returns a list of logfile lines
+    and None in case the logfile cannot be opened.
+    """
+    # TODO: Make use of the ContextManager feature of LogLinesIter
+    try:
+        log_iter = LogLinesIter(section.name_fs, section.options.encoding)
+    except OSError:
+        if debug:
+            raise
+        return u"[[[%s:cannotopen]]]\n" % section.name_write, []
+
+    try:
+        header = u"[[[%s]]]\n" % section.name_write
+
+        stat = os.stat(section.name_fs)
+        inode = stat.st_ino if is_inode_capable(section.name_fs) else 1
+        # If we have never seen this file before, we set the inode to -1
+        prev_inode = filestate.get('inode', -1)
+        filestate['inode'] = inode
+
+        # Look at which file offset we have finished scanning the logfile last time.
+        offset = filestate.get('offset')
+        # Set the current pointer to the file end
+        filestate['offset'] = stat.st_size
+
+        # If we have never seen this file before, we do not want
+        # to make a fuss about ancient log messages... (unless configured to)
+        if offset is None and not (section.options.fromstart or debug):
+            return header, []
+
+        # If the inode of the logfile has changed it has appearently
+        # been started from new (logfile rotation). At least we must
+        # assume that. In some rare cases (restore of a backup, etc)
+        # we are wrong and resend old log messages
+        if prev_inode >= 0 and inode != prev_inode:
+            offset = None
+
+        # Our previously stored offset is the current end ->
+        # no new lines in this file
+        if offset == stat.st_size:
+            return header, []
+
+        # If our offset is beyond the current end, the logfile has been
+        # truncated or wrapped while keeping the same inode. We assume
+        # that it contains all new data in that case and restart from
+        # beginning.
+        if offset is not None and offset > stat.st_size:
+            offset = None
+
+        # now seek to offset where interesting data begins
+        log_iter.set_position(offset)
+
+        worst = -1
+        warnings_and_errors = []
+        lines_parsed = 0
+        start_time = time.time()
+
+        while True:
+            line = log_iter.next_line()
+            if line is None:
+                break  # End of file
+
+            # Handle option maxlinesize
+            if section.options.maxlinesize is not None and len(line) > section.options.maxlinesize:
+                line = line[:section.options.maxlinesize] + u"[TRUNCATED]\n"
+
+            lines_parsed += 1
+            # Check if maximum number of new log messages is exceeded
+            if section.options.maxlines is not None and lines_parsed > section.options.maxlines:
+                warnings_and_errors.append(
+                    u"%s Maximum number (%d) of new log messages exceeded.\n" % (
+                        section.options.overflow,
+                        section.options.maxlines,
+                    ))
+                worst = max(worst, section.options.overflow_level)
+                log_iter.skip_remaining()
+                break
+
+            # Check if maximum processing time (per file) is exceeded. Check only
+            # every 100'th line in order to save system calls
+            if section.options.maxtime is not None and lines_parsed % 100 == 10 \
+                    and time.time() - start_time > section.options.maxtime:
+                warnings_and_errors.append(
+                    u"%s Maximum parsing time (%.1f sec) of this log file exceeded.\n" % (
+                        section.options.overflow,
+                        section.options.maxtime,
+                    ))
+                worst = max(worst, section.options.overflow_level)
+                log_iter.skip_remaining()
+                break
+
+            level = "."
+            for lev, pattern, cont_patterns, replacements in section.compiled_patterns:
+
+                matches = pattern.search(line[:-1])
+                if matches:
+                    level = lev
+                    levelint = {'C': 2, 'W': 1, 'O': 0, 'I': -1, '.': -1}[lev]
+                    worst = max(levelint, worst)
+
+                    # TODO: the following for block should be a method of the iterator
+                    # Check for continuation lines
+                    for cont_pattern in cont_patterns:
+                        if isinstance(cont_pattern, int):  # add that many lines
+                            for _unused_x in range(cont_pattern):
+                                cont_line = log_iter.next_line()
+                                if cont_line is None:  # end of file
+                                    break
+                                line = line[:-1] + "\1" + cont_line
+
+                        else:  # pattern is regex
+                            while True:
+                                cont_line = log_iter.next_line()
+                                if cont_line is None:  # end of file
+                                    break
+                                if cont_pattern.search(cont_line[:-1]):
+                                    line = line[:-1] + "\1" + cont_line
+                                else:
+                                    log_iter.push_back_line(
+                                        cont_line)  # sorry for stealing this line
+                                    break
+
+                    # Replacement
+                    for replace in replacements:
+                        line = replace.replace('\\0', line.rstrip()) + "\n"
+                        for num, group in enumerate(matches.groups()):
+                            if group is not None:
+                                line = line.replace('\\%d' % (num + 1), group)
+
+                    break  # matching rule found and executed
+
+            if level == "I":
+                level = "."
+            if section.options.nocontext and level == '.':
+                continue
+
+            out_line = "%s %s" % (level, line[:-1])
+            if sys.stdout.isatty():
+                out_line = "%s%s%s" % (TTY_COLORS[level], out_line.replace(
+                    "\1", "\nCONT:"), TTY_COLORS['normal'])
+            warnings_and_errors.append("%s\n" % out_line)
+
+        new_offset = log_iter.get_position()
+    finally:
+        log_iter.close()
+
+    filestate['offset'] = new_offset
+
+    # Handle option maxfilesize, regardless of warning or errors that have happened
+    if section.options.maxfilesize:
+        offset_wrap = (new_offset // section.options.maxfilesize)
+        if ((offset or 0) // section.options.maxfilesize) < offset_wrap:
+            warnings_and_errors.append(
+                u"%sW Maximum allowed logfile size (%d bytes) exceeded for the %dth time.%s\n" %
+                (TTY_COLORS['W'] if sys.stdout.isatty() else '', section.options.maxfilesize,
+                 offset_wrap, TTY_COLORS['normal'] if sys.stdout.isatty() else ''))
+
+    # output all lines if at least one warning, error or ok has been found
+    if worst > -1:
+        return header, warnings_and_errors
+    return header, []
+
+
+class Options(object):  # pylint: disable=useless-object-inheritance
+    """Options w.r.t. logfile patterns (not w.r.t. cluster mapping)."""
+    MAP_OVERFLOW = {'C': 2, 'W': 1, 'I': 0, 'O': 0}
+    MAP_BOOL = {'true': True, 'false': False, '1': True, '0': False, 'yes': True, 'no': False}
+    DEFAULTS = {
+        'encoding': None,
+        'maxfilesize': None,
+        'maxlines': None,
+        'maxtime': None,
+        'maxlinesize': None,
+        'regex': None,
+        'overflow': 'C',
+        'nocontext': None,
+        'maxcontextlines': None,
+        'maxoutputsize': 500000,  # same as logwatch_max_filesize in check plugin
+        'fromstart': False,
+    }
+
+    def __init__(self):
+        self.values = {}
+
+    @property
+    def encoding(self):
+        return self._attr_or_default('encoding')
+
+    @property
+    def maxfilesize(self):
+        return self._attr_or_default('maxfilesize')
+
+    @property
+    def maxlines(self):
+        return self._attr_or_default('maxlines')
+
+    @property
+    def maxtime(self):
+        return self._attr_or_default('maxtime')
+
+    @property
+    def maxlinesize(self):
+        return self._attr_or_default('maxlinesize')
+
+    @property
+    def regex(self):
+        return self._attr_or_default('regex')
+
+    @property
+    def overflow(self):
+        return self._attr_or_default('overflow')
+
+    @property
+    def nocontext(self):
+        return self._attr_or_default('nocontext')
+
+    @property
+    def maxcontextlines(self):
+        return self._attr_or_default('maxcontextlines')
+
+    @property
+    def maxoutputsize(self):
+        return self._attr_or_default('maxoutputsize')
+
+    @property
+    def fromstart(self):
+        return self._attr_or_default('fromstart')
+
+    def _attr_or_default(self, key):
+        if key in self.values:
+            return self.values[key]
+        return Options.DEFAULTS[key]
+
+    @property
+    def overflow_level(self):
+        return self.MAP_OVERFLOW[self.overflow]
+
+    def update(self, other):
+        self.values.update(other.values)
+
+    def set_opt(self, opt_str):
+        try:
+            key, value = opt_str.split('=', 1)
+            if key == 'encoding':
+                ''.encode(value)  # make sure it's an encoding
+                self.values[key] = value
+            elif key in ('maxlines', 'maxlinesize', 'maxfilesize', 'maxoutputsize'):
+                self.values[key] = int(value)
+            elif key in ('maxtime',):
+                self.values[key] = float(value)
+            elif key == 'overflow':
+                if value not in Options.MAP_OVERFLOW.keys():
+                    raise ValueError("Invalid overflow: %r (choose from %r)" % (
+                        value,
+                        Options.MAP_OVERFLOW.keys(),  # pylint: disable=dict-keys-not-iterating
+                    ))
+                self.values['overflow'] = value
+            elif key in ('regex', 'iregex'):
+                flags = (re.IGNORECASE if key.startswith('i') else 0) | re.UNICODE
+                self.values['regex'] = re.compile(value, flags)
+            elif key in ('nocontext', 'fromstart'):
+                if value.lower() not in Options.MAP_BOOL.keys():
+                    raise ValueError("Invalid %s: %r (choose from %r)" % (
+                        key,
+                        value,
+                        Options.MAP_BOOL.keys(),  # pylint: disable=dict-keys-not-iterating
+                    ))
+                self.values[key] = Options.MAP_BOOL[value.lower()]
+            elif key == 'maxcontextlines':
+                before, after = (int(i) for i in value.split(','))
+                self.values[key] = (before, after)
+            else:
+                raise ValueError("Invalid option: %r" % opt_str)
+        except (ValueError, LookupError) as exc:
+            sys.stdout.write("INVALID CONFIGURATION: %s\n" % exc)
+            raise
+
+
+class PatternConfigBlock(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, files, patterns):
+        super(PatternConfigBlock, self).__init__()
+        self.files = files
+        self.patterns = patterns
+
+
+class ClusterConfigBlock(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, name, ips_or_subnets):
+        super(ClusterConfigBlock, self).__init__()
+        self.name = name
+        self.ips_or_subnets = ips_or_subnets
+
+
+def _decode_to_unicode(match):
+    # (Union[bytes, unicode, str]) -> unicode
+    # we can't use 'surrogatereplace' because that a) is py3 only b) would fail upon re-encoding
+    # we can't use 'six': this code may be executed using Python 2.5/2.6
+    if sys.version_info[0] == 2:
+        # Python 2: str @Windows && @Linux
+        return match if isinstance(match, unicode) else match.decode('utf8', 'replace')  # pylint: disable=undefined-variable
+
+    # Python 3: bytes @Linux and unicode @Windows
+    return match.decode('utf-8', 'replace') if isinstance(match, bytes) else match
+
+
+def find_matching_logfiles(glob_pattern):
+    """
+    Evaluate globbing pattern to a list of logfile IDs
+
+    Return a list of Tuples:
+     * one identifier for opening the file as used by os.open (byte str or unicode)
+     * one unicode str, safe for writing
+
+    Glob matching of hard linked, unbroken soft linked/symlinked files.
+    No tilde expansion is done, but *, ?, and character ranges expressed with []
+    will be correctly matched.
+
+    No support for recursive globs ** (supported beginning with Python3.5 only).
+
+    Hard linked dublicates of files are not filtered.
+    Soft links may not be detected properly dependent on the Python runtime
+    [Python Standard Lib, os.path.islink()].
+    """
+    if platform.system() == "Windows":
+        # windows is the easy case:
+        # provide unicode, and let python deal with the rest
+        # (see https://www.python.org/dev/peps/pep-0277)
+        matches = list(glob.glob(glob_pattern))
+    else:
+        # we can't use glob on unicode, as it would try to re-decode matches with ascii
+        matches = glob.glob(glob_pattern.encode('utf8'))
+
+    # skip dirs
+    file_refs = []
+    for match in matches:
+        if os.path.isdir(match):
+            continue
+
+        # match is bytes in Linux and unicode/str in Windows
+        match_readable = _decode_to_unicode(match)
+
+        file_refs.append((match, match_readable))
+
+    return file_refs
+
+
+def _search_optimize_raw_pattern(raw_pattern):
+    """return potentially stripped pattern for use with *search*
+
+    Stripping leading and trailing '.*' avoids catastrophic backtracking
+    when long log lines are being processed
+    """
+    start_idx = 2 if raw_pattern.startswith('.*') else 0
+    end_idx = -2 if raw_pattern.endswith('.*') else None
+    return raw_pattern[start_idx:end_idx] or raw_pattern
+
+
+def _compile_continuation_pattern(raw_pattern):
+    try:
+        return int(raw_pattern)
+    except (ValueError, TypeError):
+        return re.compile(_search_optimize_raw_pattern(raw_pattern), re.UNICODE)
+
+
+class LogfileSection(object):  # pylint: disable=useless-object-inheritance
+    def __init__(self, logfile_ref):
+        super(LogfileSection, self).__init__()
+        self.name_fs = logfile_ref[0]
+        self.name_write = logfile_ref[1]
+        self.options = Options()
+        self.patterns = []
+        self._compiled_patterns = None
+
+    @property
+    def compiled_patterns(self):
+        if self._compiled_patterns is not None:
+            return self._compiled_patterns
+
+        compiled_patterns = []
+        for level, raw_pattern, cont_list, rewrite_list in self.patterns:
+            if not rewrite_list:
+                # it does not matter what the matched group is in this case
+                raw_pattern = _search_optimize_raw_pattern(raw_pattern)
+            compiled = re.compile(raw_pattern, re.UNICODE)
+            cont_list = [_compile_continuation_pattern(cp) for cp in cont_list]
+            compiled_patterns.append((level, compiled, cont_list, rewrite_list))
+
+        self._compiled_patterns = compiled_patterns
+        return self._compiled_patterns
+
+
+def parse_sections(logfiles_config):
+    """
+    Returns a list of LogfileSections and and a list of non-matching patterns.
+    """
+    found_sections = {}  # type: dict
+    non_matching_patterns = []
+
+    for cfg in logfiles_config:
+
+        # First read all the options like 'maxlines=100' or 'maxtime=10'
+        opt = Options()
+        for item in cfg.files:
+            if '=' in item:
+                opt.set_opt(item)
+
+        # Then handle the file patterns
+        # The thing here is that the same file could match different patterns.
+        for glob_pattern in (f for f in cfg.files if '=' not in f):
+            logfile_refs = find_matching_logfiles(glob_pattern)
+            if opt.regex is not None:
+                logfile_refs = [ref for ref in logfile_refs if opt.regex.search(ref[1])]
+            if not logfile_refs:
+                non_matching_patterns.append(glob_pattern)
+            for logfile_ref in logfile_refs:
+                section = found_sections.setdefault(logfile_ref[0], LogfileSection(logfile_ref))
+                section.patterns.extend(cfg.patterns)
+                section.options.update(opt)
+
+    logfile_sections = [found_sections[k] for k in sorted(found_sections)]
+
+    return logfile_sections, non_matching_patterns
+
+
+def ip_in_subnetwork(ip_address, subnetwork):
+    """
+    Accepts ip address as string e.g. "10.80.1.1" and CIDR notation as string e.g."10.80.1.0/24".
+    Returns False in case of incompatible IP versions.
+
+    Implementation depends on Python2 and Python3 standard lib only.
+    """
+    (ip_integer, version1) = _ip_to_integer(ip_address)
+    (ip_lower, ip_upper, version2) = _subnetwork_to_ip_range(subnetwork)
+    if version1 != version2:
+        return False
+    return ip_lower <= ip_integer <= ip_upper
+
+
+def _ip_to_integer(ip_address):
+    """
+    Raises ValueError in case of invalid IP address.
+    """
+    # try parsing the IP address first as IPv4, then as IPv6
+    for version in (socket.AF_INET, socket.AF_INET6):
+        try:
+            ip_hex = socket.inet_pton(version, ip_address)
+        except socket.error:
+            continue
+        ip_integer = int(binascii.hexlify(ip_hex), 16)
+        return (ip_integer, 4 if version == socket.AF_INET else 6)
+    raise ValueError("invalid IP address: %r" % ip_address)
+
+
+def _subnetwork_to_ip_range(subnetwork):
+    """
+    Convert subnetwork to a range of IP addresses
+
+    Raises ValueError in case of invalid subnetwork.
+    """
+    if '/' not in subnetwork:
+        ip_integer, version = _ip_to_integer(subnetwork)
+        return ip_integer, ip_integer, version
+    network_prefix, netmask_len = subnetwork.split('/', 1)
+    # try parsing the subnetwork first as IPv4, then as IPv6
+    for version, ip_len in ((socket.AF_INET, 32), (socket.AF_INET6, 128)):
+        try:
+            ip_hex = socket.inet_pton(version, network_prefix)
+        except socket.error:
+            continue
+        try:
+            suffix_mask = (1 << (ip_len - int(netmask_len))) - 1
+        except ValueError:  # netmask_len is too large or invalid
+            raise ValueError("invalid subnetwork: %r" % subnetwork)
+        netmask = ((1 << ip_len) - 1) - suffix_mask
+        ip_lower = int(binascii.hexlify(ip_hex), 16) & netmask
+        ip_upper = ip_lower + suffix_mask
+        return (ip_lower, ip_upper, 4 if version == socket.AF_INET else 6)
+    raise ValueError("invalid subnetwork: %r" % subnetwork)
+
+
+def _filter_maxoutputsize(lines, maxoutputsize):
+    """Produce lines right *before* maxoutputsize is exceeded"""
+    bytecount = 0
+    for line in lines:
+        bytecount += len(line.encode('utf-8'))
+        if bytecount > maxoutputsize:
+            break
+        yield line
+
+
+def _filter_maxcontextlines(lines_list, before, after):
+    """Only produce lines from a limited context
+
+    Think of grep's -A and -B options
+    """
+
+    n_lines = len(lines_list)
+    indices = iter(range(-before, n_lines))
+    context_end = -1
+    for idx in indices:
+        new_in_context_idx = idx + before
+        if new_in_context_idx < n_lines and context_end < n_lines:
+            new_in_context = lines_list[new_in_context_idx]
+            # if the line ahead is relevant, extend the context
+            if new_in_context.startswith(("C", "W")):
+                context_end = new_in_context_idx + after
+        if 0 <= idx <= context_end:
+            yield lines_list[idx]
+
+
+def write_output(header, lines, options):
+
+    if options.maxcontextlines:
+        lines = _filter_maxcontextlines(lines, *options.maxcontextlines)
+
+    lines = _filter_maxoutputsize(lines, options.maxoutputsize)
+
+    sys.stdout.write(header)
+    sys.stdout.writelines(lines)
+
+
+def main(argv=None):
+    if argv is None:
+        argv = sys.argv
+
+    args = ArgsParser(argv)
+    init_logging(args.verbosity)
+
+    sys.stdout.write("<<<logwatch>>>\n")
+
+    try:
+        # This removes leftover folders which may be generated by crashing frozen binaries
+        folder_cleaner = MEIFolderCleaner()
+        folder_cleaner.find_and_remove_leftover_folders(hint_filenames=["mk_logwatch.exe.manifest"])
+    except Exception as exc:
+        sys.stdout.write("ERROR WHILE DOING FOLDER: %s\n" % exc)
+        sys.exit(1)
+
+    try:
+        files = get_config_files(MK_CONFDIR, config_file_arg=args.config)
+        logfiles_config, cluster_config = read_config(files, args.debug)
+    except Exception as exc:
+        if args.debug:
+            raise
+        sys.stdout.write(CONFIG_ERROR_PREFIX + "%s\n" % exc)
+        sys.exit(1)
+
+    status_filename = get_status_filename(cluster_config)
+    # Copy the last known state from the logwatch.state when there is no status_filename yet.
+    if not os.path.exists(status_filename) and os.path.exists("%s/logwatch.state" % MK_VARDIR):
+        shutil.copy("%s/logwatch.state" % MK_VARDIR, status_filename)
+
+    found_sections, non_matching_patterns = parse_sections(logfiles_config)
+
+    for pattern in non_matching_patterns:
+        # Python 2.5/2.6 compatible solution
+        if sys.version_info[0] == 3:
+            sys.stdout.write("[[[%s:missing]]]\n" % pattern)
+        else:
+            sys.stdout.write((u"[[[%s:missing]]]\n" % pattern).encode('utf-8'))
+
+    state = State(status_filename)
+    try:
+        state.read()
+    except Exception as exc:
+        if args.debug:
+            raise
+        # Simply ignore errors in the status file.  In case of a corrupted status file we simply
+        # begin with an empty status. That keeps the monitoring up and running - even if we might
+        # lose a message in the extreme case of a corrupted status file.
+        LOGGER.warning("Exception reading status file: %s", str(exc))
+
+    for section in found_sections:
+        filestate = state.get(section.name_fs)
+        try:
+            header, output = process_logfile(section, filestate, args.debug)
+            write_output(header, output, section.options)
+        except Exception as exc:
+            if args.debug:
+                raise
+            LOGGER.debug("Exception when processing %r: %s", section.name_fs, exc)
+
+    if args.debug:
+        LOGGER.debug("State file not written (debug mode)")
+        return
+    if not args.no_state:
+        state.write()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/checkmk/debian/logwatch/init.sls b/checkmk/debian/logwatch/init.sls
index 45b5fdb88b0fd1a630f37bfa126c5c1dfe5d66f0..a412e5a497abb938abd60010122aa3a4bd0d7baf 100644
--- a/checkmk/debian/logwatch/init.sls
+++ b/checkmk/debian/logwatch/init.sls
@@ -1,7 +1,7 @@
 logwatch_plugin:
   file.managed:
     - name: /usr/lib/checkmk_agent/plugins/logwatch
-    - source: salt://checkmk-files/logwatch
+    - source: salt://checkmk-files/mk_logwatch.py
     - mode: 755
     - user: root
     - group: root