Forked from
pkg / apertis-tests
591 commits behind the upstream repository.
-
Before this change, if "normal" failed due to a bug in the underlying tests and "malicious" failed due to an unmet expectation, the machine-readable parts of our log would be normal.expected: fail malicious.expected: fail and discovering the reasons would require reading logs. Now, we would log that situation as: normal.expected_underlying_tests: fail normal.expected: pass malicious.expected_underlying_tests: pass malicious.expected: fail and an appropriate developer can investigate in the right places; in this case, the "normal" failure would require someone who knows about whatever is under test, for example Tracker, while the "malicious" failure would require someone who knows about AppArmor. Differential Revision: https://phabricator.apertis.org/D277 Signed-off-by:
Simon McVittie <simon.mcvittie@collabora.co.uk> Reviewed-by: xclaesse
Before this change, if "normal" failed due to a bug in the underlying tests and "malicious" failed due to an unmet expectation, the machine-readable parts of our log would be normal.expected: fail malicious.expected: fail and discovering the reasons would require reading logs. Now, we would log that situation as: normal.expected_underlying_tests: fail normal.expected: pass malicious.expected_underlying_tests: pass malicious.expected: fail and an appropriate developer can investigate in the right places; in this case, the "normal" failure would require someone who knows about whatever is under test, for example Tracker, while the "malicious" failure would require someone who knows about AppArmor. Differential Revision: https://phabricator.apertis.org/D277 Signed-off-by:
Simon McVittie <simon.mcvittie@collabora.co.uk> Reviewed-by: xclaesse
run-aa-test 5.36 KiB
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright © 2012-2015 Collabora Ltd.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import sys
from difflib import unified_diff
from os import access, getenv, R_OK, X_OK, setsid
from os.path import basename
from sys import argv, exit
from time import sleep
from subprocess import Popen, PIPE, STDOUT, check_call
ALTERNATIVE_SEPARATOR = '## alternative ##\n'
END = 2
LAUNCH_DBUS = getenv("LAUNCH_DBUS", default=True)
if type(LAUNCH_DBUS) is str and LAUNCH_DBUS.lower() in ("0", "no", "false"):
LAUNCH_DBUS = False
RUN_AS_USER = getenv("RUN_AS_USER", default=True)
if type(RUN_AS_USER) is str and RUN_AS_USER.lower() in ("0", "no", "false"):
RUN_AS_USER = False
RUN_AA_TEST_TIMEOUT = getenv('RUN_AA_TEST_TIMEOUT', default=None)
CHAIWALA_UID = 1000
CHAIWALA_USER = "user"
# Check parameters
if len(argv) < 3:
print('Usage: run-aa-test '
'<expectation-file> <command> <argument-1> <argument-2> …')
print('"export LAUNCH_DBUS=no" in the test script to not launch a '
'dbus session.')
print("\"export RUN_AS_USER=no\" in the test script to not run as %s" %
CHAIWALA_USER)
exit(1)
if not access(argv[1], R_OK):
print("Cannot read specified expectation file: `%s'" % (argv[1]))
exit(1)
if not access(argv[2], X_OK):
print("Cannot execute specified test executable: `%s'" % (argv[2]))
exit(1)
something_failed = False
# typically "normal.expected" or "malicious.expected"
test_title = basename(argv[1])
# Touch .bash_history, which we use in some tests, if it's not there.
bash_history = '/home/{0}/.bash_history'.format(CHAIWALA_USER)
if not access(bash_history, R_OK):
check_call(['sudo', '-u', CHAIWALA_USER, 'touch', bash_history])
# Seek to end of audit.log
audit_log = open('/var/log/audit/audit.log')
audit_log.seek(0, END)
if LAUNCH_DBUS:
# Start a new D-Bus session for this test
argv[2:] = ['dbus-run-session', '--'] + argv[2:]
for prefix in '', '/usr/lib/apertis-tests/':
tool = prefix + 'common/run-test-in-systemd'
if access(tool, X_OK):
cmdline = [tool]
break
else:
raise AssertionError('common/run-test-in-systemd not found')
cmdline.append('--no-lava')
if RUN_AA_TEST_TIMEOUT is not None:
cmdline.append('--timeout=%s' % RUN_AA_TEST_TIMEOUT)
if RUN_AS_USER:
cmdline.append('--user=%s' % CHAIWALA_UID)
else:
cmdline.append('--system')
cmdline = cmdline + argv[2:]
test = Popen(cmdline, preexec_fn=setsid, stdout=PIPE,
stderr=STDOUT)
print('#=== running test script: %r ===' % cmdline)
for line in test.stdout:
sys.stdout.write(line)
# Wait for tests to exit
ret = test.wait()
print('#--- end of test script, status: %d' % ret)
if ret == 0:
print("%s_underlying_tests: pass" % test_title)
else:
print("# %s exited %d" % (' '.join(cmdline), ret))
# typically "normal.expected_underlying_tests: fail"
print("%s_underlying_tests: fail" % test_title)
something_failed = True
# Give auditd time to log the entries.
sleep(3)
print('#=== %s ===' % basename(argv[1]))
log = audit_log.readlines()
print('#---8<--- raw output in audit log')
for line in log:
print('# ' + line.rstrip('\n'))
print('#--->8---')
raw_expected = open(argv[1]).readlines()
print('#---8<--- expected output from aa_log_extract_tokens.pl')
for line in raw_expected:
print('# ' + line.rstrip('\n'))
print('#--->8---')
expected = ''.join(raw_expected)
if ALTERNATIVE_SEPARATOR in expected:
expected = expected.split(ALTERNATIVE_SEPARATOR)
aa_parse = Popen(['/usr/bin/aa_log_extract_tokens.pl', 'REJECTING'],
stdin=PIPE,
stdout=PIPE)
output = aa_parse.communicate(input=''.join(log))[0].splitlines(True)
if aa_parse.returncode != 0:
print('# aa_log_extract_tokens.pl failed, trying line-by-line...')
output = []
for line in log:
aa_parse = Popen(['/usr/bin/aa_log_extract_tokens.pl', 'REJECTING'],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE)
stdout, stderr = aa_parse.communicate(input=line)
output.extend(stdout.splitlines(True))
for line in stderr.splitlines(True):
output.append('E: ' + line)
if aa_parse.returncode != 0:
output.append('^ original line: %s' % line)
print('#---8<--- actual output from aa_log_extract_tokens.pl')
for line in output:
if line:
print('# ' + line.rstrip('\n'))
print('#--->8---')
matched_expectation = False
# We might have alternative expectations, take that into consideration.
if isinstance(expected, list):
for i, alternative in enumerate(expected):
if alternative == ''.join(output):
print('# audit log matches alternative expectation %d/%d' %
(i + 1, len(expected)))
matched_expectation = True
break
elif expected == ''.join(output):
matched_expectation = True
if matched_expectation:
print('%s: pass' % test_title)
else:
print('#---8<--- diff')
for line in unified_diff(raw_expected, output, fromfile='expected',
tofile='actual'):
print(line.rstrip('\n'))
print('#--->8---')
print('%s: fail' % test_title)
something_failed = True
if something_failed:
exit(1)