Skip to content
Snippets Groups Projects
Commit 786881af authored by Luis Araujo's avatar Luis Araujo
Browse files

Add tests for the renderer


This commit adds the unit test module for the renderer.

It includes tests for the parser, renderer and relevant formatting
methods. It also adds a set of files that are used by some of these
tests.

The commit also updates the README file explaining how to execute
these tests.

Signed-off-by: default avatarLuis Araujo <luis.araujo@collabora.co.uk>
parent eca3536b
No related branches found
No related tags found
No related merge requests found
......@@ -81,3 +81,19 @@ Now all the html tests cases should be available inside the
- The renderer parser will show warning messages for unrecognized values of
fields with multiple choices (for example, image-type, execution-type).
## Tests
The module `renderer/tests.py` contains tests for the parser, renderer and
different formatting methods. There are also a set of tests files located inside
the `renderer/tests_files/` directory that are used by some of these tests.
The unittest can be executed from the `renderer/` directory like:
```
$ python3 -m unittest -v tests.py
```
It is highly recommended to execute these tests if changes are applied to any
of the `renderer` components since merge requests will only be accepted if all the
tests pass.
......@@ -159,7 +159,7 @@ def get_template_values(testcase_data):
return template_values
def generate_test_case(tc_file, directory):
def generate_test_case(tc_file, directory='.', return_out=False):
try:
with open(tc_file) as testcase:
tc_data = yaml.safe_load(testcase)
......@@ -168,7 +168,9 @@ def generate_test_case(tc_file, directory):
exit(1)
# Parse file to detect any syntax error.
print("Parsing file", tc_file)
if not return_out:
# Just print info line if output is not returned from the method.
print("Parsing file", tc_file)
try:
parse_format(tc_data)
except (ParserTypeError, ParserMissingFieldError) as error:
......@@ -179,6 +181,10 @@ def generate_test_case(tc_file, directory):
# Get template from environment and render it.
data = env.get_template('templates/index.html').render(get_template_values(tc_data))
# Return the data if return_out=True
if return_out:
return data
filename = os.path.splitext(os.path.basename(tc_file))[0] + ".html"
print("Generating test case page", filename)
with open(os.path.join(directory, filename), 'w') as html_file:
......
#!/usr/bin/env python3
###################################################################################
# Unit tests for the test case parser and renderer.
#
# Copyright (C) 2018
# Luis Araujo <luis.araujo@collabora.co.uk>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 US
###################################################################################
import yaml
import unittest
import os.path
import copy
from parser import parse_format
from renderer import parse_list, generate_test_case
from exceptions import ParserTypeError, ParserMissingFieldError
TESTS_FILES_PATH="tests_files/"
def read_tc(filepath):
with open(filepath) as test_case:
return yaml.safe_load(test_case)
def read_file(filepath):
with open(filepath) as test_case_page:
return test_case_page.read()
class TestFieldTypes(unittest.TestCase):
"""
Test detecting type errors for fields values in the parser.
"""
def setUp(self):
self.test_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file4.yaml"))
def test_metadata_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'] = "a test"
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
def test_name_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata']['name'] = [ 'one', 'two' ]
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
def test_description_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata']['description'] = [ 'one', 'two' ]
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
def test_expected_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata']['expected'] = "a test"
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
def test_run_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['run'] = "a test"
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
def test_run_steps_type(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['run']['steps'] = "a test"
with self.assertRaises(ParserTypeError):
parse_format(tc_file)
class TestMissingFields(unittest.TestCase):
"""
Test detecting missing mandatory fields in the test case file.
"""
def setUp(self):
self.test_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file1.yaml"))
def test_missing_metadata_field(self):
tc_file = self.test_file.copy()
tc_file.pop('metadata')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_name_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('name')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_image_type_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('image-type')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_image_arch_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('image-arch')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_type_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('type')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_exec_type_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('exec-type')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_priority_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('priority')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_description_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('description')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_expected_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['metadata'].pop('expected')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_run_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file.pop('run')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
def test_missing_run_steps_field(self):
tc_file = copy.deepcopy(self.test_file)
tc_file['run'].pop('steps')
with self.assertRaises(ParserMissingFieldError):
parse_format(tc_file)
class TestParser(unittest.TestCase):
"""
Test parsing some complete test case files.
"""
def test_parsing_file1(self):
tc_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file1.yaml"))
self.assertTrue(parse_format(tc_file))
def test_parsing_file2(self):
tc_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file2.yaml"))
self.assertTrue(parse_format(tc_file))
def test_parsing_file3(self):
tc_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file3.yaml"))
self.assertTrue(parse_format(tc_file))
def test_parsing_file4(self):
tc_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file4.yaml"))
self.assertTrue(parse_format(tc_file))
def test_parsing_file5(self):
tc_file = read_tc(os.path.join(TESTS_FILES_PATH, "test_file5.yaml"))
self.assertTrue(parse_format(tc_file))
class TestListFormat(unittest.TestCase):
"""
Test the output of the method that defines the format for the HTML lines
(parse_list).
"""
def test_comment_line(self):
self.assertEqual(parse_list(["A comment"]),
[('A comment', '', '', '', '')])
def test_command_line(self):
self.assertEqual(parse_list(["$ ls -l"]), [('', '$ ls -l', '', '', '')])
def test_output_line(self):
self.assertEqual(parse_list([">output line"]),
[('', '', ['output line'], '', '')])
def test_image_line(self):
self.assertEqual(parse_list(["@image.png"]),
[('', '', '', 'image.png', '')])
def test_web_link_line(self):
self.assertEqual(parse_list(["~http://apertis.org"]),
[('', '', '', '', 'http://apertis.org')])
# Use automated_run=True
def test_automated_comment_line(self):
self.assertEqual(parse_list(["# A comment"], automated_run=True),
[(' A comment', '', '', '', '')])
def test_automated_command_line(self):
self.assertEqual(parse_list(["ls -lt"], automated_run=True),
[('', '$ ls -lt', '', '', '')])
class TestRenderFile(unittest.TestCase):
"""
Test rendering a test case YAML file and comparing the output to its expected
HTML page.
"""
def test_render_file1(self):
tc_file = os.path.join(TESTS_FILES_PATH, "test_file1.yaml")
tc_page = read_file(os.path.join(TESTS_FILES_PATH, "test_file1.html"))
self.assertEqual(generate_test_case(tc_file, return_out=True), tc_page)
def test_render_file3(self):
tc_file = os.path.join(TESTS_FILES_PATH, "test_file3.yaml")
tc_page = read_file(os.path.join(TESTS_FILES_PATH, "test_file3.html"))
self.assertEqual(generate_test_case(tc_file, return_out=True), tc_page)
def test_render_file4(self):
tc_file = os.path.join(TESTS_FILES_PATH, "test_file4.yaml")
tc_page = read_file(os.path.join(TESTS_FILES_PATH, "test_file4.html"))
self.assertEqual(generate_test_case(tc_file, return_out=True), tc_page)
def test_render_file5(self):
tc_file = os.path.join(TESTS_FILES_PATH, "test_file5.yaml")
tc_page = read_file(os.path.join(TESTS_FILES_PATH, "test_file5.html"))
self.assertEqual(generate_test_case(tc_file, return_out=True), tc_page)
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link href="css/bootstrap.min.css" rel="stylesheet">
<title>sanity-check</title>
</head>
<body>
<main role="main" class="container" style="margin-top: 40px; margin-bottom: 40px">
<h2>sanity-check <small class="text-muted">all</small></h2>
<h3><span class="badge badge-danger">critical</span></h3>
<div class="card" style="margin-top: 20px">
<div class="card-body">
<dl class="row">
<dt class="col-sm-3">Image Type:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Image Architecture:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Type:</dt>
<dd class="col-sm-9">sanity</dd>
</dl>
<h4>Description</h4>
<p>This test checks that the system bus is up, that a user session is created and that a graphical interface is successfully running in the system, as well as other similar basic services so that a stable and reliable environment can be confirmed before futher tests are executed. Basic functionality: services.</p>
<hr />
<h4>Pre Conditions</h4>
<ol>
<li class="mb-sm-2">A second system with a terminal and git available on it. This test must be executed before other tests and even before installing other packages in the image, so the script to execute this test should be fetched from a different machine.</li>
<li class="mb-sm-2">Clone the apertis-tests repository in the second system:</li>
<p><kbd>$ git clone https://gitlab.apertis.org/infrastructure/apertis-tests.git</kbd></p>
<li class="mb-sm-2">Copy the apertis-tests folder to the Apertis system. Since the Apertis image runs sshd by default, you can easily copy this file using the scp utility from the second machine, for this you will need the Apertis image IP, which you can find running the ip command from Apertis:</li>
<p><kbd>$ ip addr</kbd></p>
<li class="mb-sm-2">Replace &lt;APERTIS_IMAGE_IP&gt; by the found IP and copy the apertis-tests repository using scp from the second system where the apertis-tests repository was cloned. It would look like this:</li>
<p><kbd>$ tar cf apertis-tests.tar apertis-tests/ &amp;&amp; scp apertis-tests.tar user@&lt;APERTIS_IMAGE_IP&gt;:/home/user/</kbd></p>
<li class="mb-sm-2">Alternatively, you can copy the apertis-tests directory in other ways, for example by using an external storage device, manually mounting the device and copying the folder. It is recommended and expected that you use scp from a second machine.</li>
<li class="mb-sm-2">Once the apertis-tests folder is available in the Apertis image, unpack it in the apertis home user directory:</li>
<p><kbd>$ tar xf apertis-tests.tar</kbd></p>
<li class="mb-sm-2">Enter the apertis-tests directory</li>
<p><kbd>$ cd apertis-tests/</kbd></p>
</ol>
<hr />
<h4>Execution Steps</h4>
<ol>
<li class="mb-sm-2"># Execute the following command:</li>
<li class="mb-sm-2">common/sanity-check</li>
</ol>
<hr />
<h4>Expected</h4>
<p class="mt-sm-3">The command should report no failure and its output should be something like this at the end:</p>
<p class="mb-sm-0 pl-sm-3"><samp>+ grep -E ^NAME=(&#34;?)Apertis\1$ /etc/os-release</samp></p><p class="mb-sm-0 pl-sm-3"><samp>NAME=&#34;Apertis&#34;</samp></p><p class="mb-sm-0 pl-sm-3"><samp>+ grep -E ^ID=(&#34;?)apertis\1$ /etc/os-release</samp></p><p class="mb-sm-0 pl-sm-3"><samp>ID=apertis</samp></p><p class="mb-sm-0 pl-sm-3"><samp>+ id -u user</samp></p><p class="mb-sm-0 pl-sm-3"><samp>+ test -S /run/user/1000/wayland-0</samp></p><p class="mb-sm-0 pl-sm-3"><samp>+ set +x</samp></p><p class="mb-sm-0 pl-sm-3"><samp># Sanity check successful</samp></p>
</div>
</div>
<div class="card" style="margin-top: 30px">
<div class="card-body">
<h4>Notes</h4>
<ul>
<li class="mb-sm-2">IMPORTANT: If this test fails for an image, NO further tests should be executed on that image, since this invalidates all test results.</li>
<li class="mb-sm-2">This test must be executed in an image before running either automated or manual tests.</li>
</ul>
</div>
</div>
</main>
</body>
</html>
\ No newline at end of file
metadata:
name: sanity-check
format: "Apertis Test Definition 1.0"
image-type: any
image-arch: any
type: sanity
exec-type: all
priority: critical
maintainer: "Apertis Project"
description: "This test checks that the system bus is up, that a user session is
created and that a graphical interface is successfully running in
the system, as well as other similar basic services so that a
stable and reliable environment can be confirmed before futher
tests are executed.
Basic functionality: services."
pre-conditions:
- "A second system with a terminal and git available on it. This test must be
executed before other tests and even before installing other packages in
the image, so the script to execute this test should be fetched from a
different machine."
- "Clone the apertis-tests repository in the second system:"
- $ git clone https://gitlab.apertis.org/infrastructure/apertis-tests.git
- "Copy the apertis-tests folder to the Apertis system. Since the Apertis
image runs sshd by default, you can easily copy this file using the scp
utility from the second machine, for this you will need the Apertis image
IP, which you can find running the ip command from Apertis:"
- $ ip addr
- "Replace <APERTIS_IMAGE_IP> by the found IP and copy the apertis-tests
repository using scp from the second system where the apertis-tests
repository was cloned. It would look like this:"
- $ tar cf apertis-tests.tar apertis-tests/ && scp apertis-tests.tar user@<APERTIS_IMAGE_IP>:/home/user/
- "Alternatively, you can copy the apertis-tests directory in other ways, for
example by using an external storage device, manually mounting the device
and copying the folder. It is recommended and expected that you use scp
from a second machine."
- "Once the apertis-tests folder is available in the Apertis image, unpack it
in the apertis home user directory:"
- $ tar xf apertis-tests.tar
- "Enter the apertis-tests directory"
- $ cd apertis-tests/
expected:
- "The command should report no failure and its output should be something
like this at the end:"
- |
>+ grep -E ^NAME=("?)Apertis\1$ /etc/os-release
NAME="Apertis"
+ grep -E ^ID=("?)apertis\1$ /etc/os-release
ID=apertis
+ id -u user
+ test -S /run/user/1000/wayland-0
+ set +x
# Sanity check successful
notes:
- "IMPORTANT: If this test fails for an image, NO further tests should be
executed on that image, since this invalidates all test results."
- "This test must be executed in an image before running either automated or
manual tests."
run:
steps:
- "# Execute the following command:"
- common/sanity-check
parse:
pattern: 'TEST_RESULT:(?P<result>\w+):(?P<test_case_id>[^:]+):'
metadata:
name: webkit2gtk-actor-tiles
format: "Apertis Test Definition 1.0"
image-type: target
image-arch: any
type: functional
exec-type: manual
priority: medium
maintainer: "Apertis Project"
description: "Test implementation of actor-based tiled backing store in
webkit2gtk."
resources:
- "Mouse or touchscreen."
macro_install_packages_preconditions: webkit2gtk-testing
expected:
- "This test will be considered a PASS if it works with at least one of mouse
or touchpad because this is not a test for the mouse or touchpad working."
- "The page will look like this after the first load:"
- "@Actor-tiles-01.png"
- "After clicking the first link the page will look like this:"
- "@Actor-tiles-02.png"
- "After clicking the second link the page will look like this:"
- "@Actor-tiles-03.png"
- "The new page that will be loaded looks like this:"
- "@Actor-tiles-04.png"
- "Going back should give you the same rendering you had before clicking the
link to the new page, except the link will be colored purple."
- "@Actor-tiles-05.png"
- "After clicking the link that will take you to the beginning of the page and
repeating the process, the renderings should be the same as the ones above."
- "Note that when a page starts loading some of the tiles may contain a
checker board; that is normal, but we plan to improve on it."
run:
steps:
- "Launch the test application with each URL in resources with and without
CLUTTER_PAINT=paint-volumes:"
- $ env CLUTTER_PAINT=paint-volumes GtkClutterLauncher file:///usr/share/webkit2gtk/testing/actor-tiles-test.html
- "Check that the rendering of the page matches the reference in the results
section."
- "Click the link that will scroll the page a bit"
- "Check that the rendering of the page matches the reference in the results
section"
- "Click the link that will scroll to the very bottom"
- "Check that the rendering of the page matches the reference in the results
section"
- "Click the first link that will take you to a different page"
- "Check that the rendering of the page matches the reference in the results
section"
- "Click the back button of the browser"
- "Check that the rendering of the page matches the reference in the results
section"
- "Click the second link, that will send you to the bottom"
- "Follow the links again until you get to the end of the page again, always
making sure the rendering matches the reference rendering"
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link href="css/bootstrap.min.css" rel="stylesheet">
<title>bluez-avrcp-volume</title>
</head>
<body>
<main role="main" class="container" style="margin-top: 40px; margin-bottom: 40px">
<h2>bluez-avrcp-volume <small class="text-muted">manual</small></h2>
<h3><span class="badge badge-info">medium</span></h3>
<div class="card" style="margin-top: 20px">
<div class="card-body">
<dl class="row">
<dt class="col-sm-3">Image Type:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Image Architecture:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Type:</dt>
<dd class="col-sm-9">functional</dd>
</dl>
<h4>Description</h4>
<p>Test the AVRCP volume up and down commands.</p>
<hr />
<h4>Resources</h4>
<ul>
<li>A Bluetooth adapter</li>
<li>An A2DP Source and AVRCP capable phone (Nokia N9 — do not use an Android 4.2 phone as their Bluetooth AVRCP support is broken)</li>
<li>Note that you do not need to play any music on the phone; nor do you need to have headphones or a speaker plugged into the Apertis device.</li>
</ul>
<hr />
<h4>Pre Conditions</h4>
<ol>
<li class="mb-sm-2">Ensure Rootfs is remounted as read/write.</li>
<p><kbd>$ sudo mount -o remount,rw /</kbd></p>
<li class="mb-sm-2">Install dependencies</li>
<p><kbd>$ sudo apt install apertis-tests python3-dbus python3-gi</kbd></p>
<li class="mb-sm-2">Restart the system to restore the filesystem state to read-only before running the test.</li>
<p><kbd>$ sudo reboot</kbd></p>
</ol>
<hr />
<h4>Execution Steps</h4>
<ol>
<li class="mb-sm-2">Ensure PulseAudio is activated:</li>
<p><kbd>$ pactl stat</kbd></p>
<li class="mb-sm-2">Run btmon before any connection happens:</li>
<p><kbd>$ sudo btmon | grep -A4 &#39;AV/C: Control&#39;</kbd></p>
<li class="mb-sm-2">Start simple agent:</li>
<p><kbd>$ /usr/lib/chaiwala-tests/bluez/simple-agent -y</kbd></p>
<li class="mb-sm-2">Pair both devices initiating from the phone side</li>
<li class="mb-sm-2">Pay attention to the simple-agent window as it requires input during the pairing process.</li>
<li class="mb-sm-2">In another terminal, execute (You can get the Bluetooth device address by running hcitool scan):</li>
<p><kbd>$ /usr/lib/chaiwala-tests/bluez/test-avrcp.py -i hci0 device_address</kbd></p>
</ol>
<hr />
<h4>Expected</h4>
<p class="mt-sm-3">If success, the following output should be generated by btmon:</p>
<p class="mb-sm-0 pl-sm-3"><samp>AV/C: Control: address 0x48 opcode 0x7c</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Subunit: Panel</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Opcode: Passthrough</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Operation: 0x42 (VOLUME DOWN Pressed)</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Length: 0x00</samp></p><p class="mb-sm-0 pl-sm-3"><samp>--</samp></p><p class="mb-sm-0 pl-sm-3"><samp>AV/C: Control: address 0x48 opcode 0x7c</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Subunit: Panel</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Opcode: Passthrough</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Operation: 0x41 (VOLUME UP Pressed)</samp></p><p class="mb-sm-0 pl-sm-3"><samp>Length: 0x00</samp></p>
</div>
</div>
</main>
</body>
</html>
\ No newline at end of file
metadata:
name: bluez-avrcp-volume
format: "Apertis Test Definition 1.0"
image-type: any
image-arch: any
type: functional
exec-type: manual
priority: medium
maintainer: "Apertis Project"
description: "Test the AVRCP volume up and down commands."
resources:
- "A Bluetooth adapter"
- "An A2DP Source and AVRCP capable phone (Nokia N9 do not use an
Android 4.2 phone as their Bluetooth AVRCP support is broken)"
- "Note that you do not need to play any music on the phone; nor do you need
to have headphones or a speaker plugged into the Apertis device."
macro_install_packages_preconditions: apertis-tests python3-dbus python3-gi
expected:
- "If success, the following output should be generated by btmon:"
- |
>AV/C: Control: address 0x48 opcode 0x7c
Subunit: Panel
Opcode: Passthrough
Operation: 0x42 (VOLUME DOWN Pressed)
Length: 0x00
--
AV/C: Control: address 0x48 opcode 0x7c
Subunit: Panel
Opcode: Passthrough
Operation: 0x41 (VOLUME UP Pressed)
Length: 0x00
run:
steps:
- "Ensure PulseAudio is activated:"
- $ pactl stat
- "Run btmon before any connection happens:"
- "$ sudo btmon | grep -A4 'AV/C: Control'"
- "Start simple agent:"
- $ /usr/lib/chaiwala-tests/bluez/simple-agent -y
- "Pair both devices initiating from the phone side"
- "Pay attention to the simple-agent window as it requires input during
the pairing process."
- "In another terminal, execute (You can get the Bluetooth device address by
running hcitool scan):"
- $ /usr/lib/chaiwala-tests/bluez/test-avrcp.py -i hci0 device_address
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link href="css/bootstrap.min.css" rel="stylesheet">
<title>apparmor</title>
</head>
<body>
<main role="main" class="container" style="margin-top: 40px; margin-bottom: 40px">
<h2>apparmor <small class="text-muted">automated</small></h2>
<h3><span class="badge badge-danger">critical</span></h3>
<div class="card" style="margin-top: 20px">
<div class="card-body">
<dl class="row">
<dt class="col-sm-3">Image Type:</dt>
<dd class="col-sm-9">target</dd>
<dt class="col-sm-3">Image Architecture:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Type:</dt>
<dd class="col-sm-9">functional</dd>
</dl>
<h4>Description</h4>
<p>Security infrastructure: checking that AppArmor is available in the kernel and active is part of strategic application and services confinement.</p>
<hr />
<h4>Pre Conditions</h4>
<ol>
<li class="mb-sm-2">From a PC, download and unpack the test data tarball from the gitlab test repository:</li>
<p><kbd>$ wget https://gitlab.apertis.org/tests/apparmor/-/archive/master/apparmor.tar.gz</kbd></p>
<p><kbd>$ tar -xvf apparmor.tar.gz</kbd></p>
<li class="mb-sm-2">Copy the apparmor-master-* to the device</li>
<p><kbd>$ DUT_IP=&ltdevice-ip&gt</kbd></p>
<p><kbd>$ scp -r apparmor-master-* user@$DUT_IP:</kbd></p>
<li class="mb-sm-2">Log into the target</li>
<p><kbd>$ ssh user@$DUT_IP</kbd></p>
<li class="mb-sm-2">After log into the DUT, enter the test directory</li>
<p><kbd>$ cd apparmor-master-*</kbd></p>
<li class="mb-sm-2">Note that the tarball may change depending on the release/branch being tested, please make sure to download the correct tarball for the release in question.</li>
</ol>
<hr />
<h4>Execution Steps</h4>
<ol>
<li class="mb-sm-2"> Execute the following command:</li>
<p><kbd>$ $ common/run-test-in-systemd --basename --timeout 3000 ./apparmor.sh</kbd></p>
</ol>
<hr />
<h4>Expected</h4>
<p class="mt-sm-3">The apparmor.sh script should return 0. The return value indicates the number of tests which failed. The ouput should be similar to the following example:</p>
<p class="mb-sm-0 pl-sm-3"><samp>access.sh: PASSED - /tmp/sdtest.4283-31579-rMuWBs</samp></p><p class="mb-sm-0 pl-sm-3"><samp>capabilities.sh: FAILED - /tmp/sdtest.4548-350-UPHWk3</samp></p>
<p class="mt-sm-3">Although extraneous lines might been shown in case of error or for debug. Those won&#39;t interfere with the test parser.</p>
</div>
</div>
<div class="card" style="margin-top: 30px">
<div class="card-body">
<h4>Notes</h4>
<ul>
<li class="mb-sm-2">Make sure that you have disconnect the ethernet connection to the target before you start the tethering process.</li>
<li class="mb-sm-2">This testcase groups upstream developed tests mostly and therefore it&#39;s not runnning any Apertis specific test.</li>
</ul>
</div>
</div>
</main>
</body>
</html>
\ No newline at end of file
metadata:
name: apparmor
format: "Apertis Test Definition 1.0"
image-type: target
image-arch: any
type: functional
exec-type: automated
priority: critical
maintainer: "Apertis Project"
description: "Security infrastructure: checking that AppArmor is available in
the kernel and active is part of strategic application and
services confinement."
macro_ostree_preconditions: apparmor
expected:
- "The apparmor.sh script should return 0. The return value indicates the
number of tests which failed. The ouput should be similar to the following
example:"
- |
>access.sh: PASSED - /tmp/sdtest.4283-31579-rMuWBs
capabilities.sh: FAILED - /tmp/sdtest.4548-350-UPHWk3
- "Although extraneous lines might been shown in case of error or for debug.
Those won't interfere with the test parser."
notes:
- "Make sure that you have disconnect the ethernet connection to the target
before you start the tethering process."
- "This testcase groups upstream developed tests mostly and therefore it's not
runnning any Apertis specific test."
run:
steps:
- "# Execute the following command:"
- $ common/run-test-in-systemd --basename --timeout 3000 ./apparmor.sh
parse:
fixupdict:
FAILED: fail
PASSED: pass
SKIP: skip
pattern: ^(?P<test_case_id>.+)\.sh:\s*(?P<result>PASSED|FAILED|SKIP)\s*-.*
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1, shrink-to-fit=no">
<link href="css/bootstrap.min.css" rel="stylesheet">
<title>ribchester</title>
</head>
<body>
<main role="main" class="container" style="margin-top: 40px; margin-bottom: 40px">
<h2>ribchester <small class="text-muted">automated</small></h2>
<h3><span class="badge badge-danger">critical</span></h3>
<div class="card" style="margin-top: 20px">
<div class="card-body">
<dl class="row">
<dt class="col-sm-3">Image Type:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Image Architecture:</dt>
<dd class="col-sm-9">any</dd>
<dt class="col-sm-3">Type:</dt>
<dd class="col-sm-9">functional</dd>
</dl>
<h4>Description</h4>
<p>Ribchester library test.</p>
<hr />
<h4>Pre Conditions</h4>
<ol>
<li class="mb-sm-2">Ensure Rootfs is remounted as read/write.</li>
<p><kbd>$ sudo mount -o remount,rw /</kbd></p>
<li class="mb-sm-2">Install dependencies</li>
<p><kbd>$ sudo apt install build-essential devscripts gnome-desktop-testing python3-debian ribchester-dev ribchester-tests</kbd></p>
<li class="mb-sm-2">Restart the system to restore the filesystem state to read-only before running the test.</li>
<p><kbd>$ sudo reboot</kbd></p>
<li class="mb-sm-2">Download the ribchester binary inside the /tmp directory.</li>
<p><kbd>$ cd /tmp/</kbd></p>
<p><kbd>$ apt source ribchester</kbd></p>
<p><kbd>$ chown user:user -R /tmp/ribchester-*</kbd></p>
<li class="mb-sm-2">Clone the apertis-tests git repository:</li>
<p><kbd>$ git clone https://gitlab.apertis.org/infrastructure/apertis-tests.git</kbd></p>
<li class="mb-sm-2">Enter the tests directory and follow the execution steps.</li>
<p><kbd>$ cd apertis-tests/</kbd></p>
</ol>
<hr />
<h4>Execution Steps</h4>
<ol>
<li class="mb-sm-2"> Execute the following command:</li>
<p><kbd>$ common/run-test-in-systemd --timeout=900 --chdir /tmp/ribchester-* --user=user --name=ribchester -- sadt --verbose</kbd></p>
</ol>
<hr />
<h4>Expected</h4>
<p class="mt-sm-3">All tests should pass (ok). The output should be similar to:</p>
<p class="mb-sm-0 pl-sm-3"><samp>test.name0...ok</samp></p><p class="mb-sm-0 pl-sm-3"><samp>test.name1...ok</samp></p><p class="mb-sm-0 pl-sm-3"><samp>test.name2...ok</samp></p><p class="mb-sm-0 pl-sm-3"><samp>test.nameN...ok</samp></p>
</div>
</div>
</main>
</body>
</html>
\ No newline at end of file
metadata:
name: ribchester
format: "Apertis Test Definition 1.0"
image-type: any
image-arch: any
type: functional
exec-type: automated
priority: critical
maintainer: "Apertis Project"
description: "Ribchester library test."
macro_modules_preconditions: ribchester
expected:
- "All tests should pass (ok). The output should be similar to:"
- |
>test.name0...ok
test.name1...ok
test.name2...ok
test.nameN...ok
install:
deps:
- build-essential
- devscripts
- gnome-desktop-testing
- python3-debian
- ribchester-dev
- ribchester-tests
steps:
# Download in /tmp directory
- cd /tmp/
- apt source ribchester
- chown user:user -R /tmp/ribchester-*
run:
steps:
- "# Execute the following command:"
- common/run-test-in-systemd --timeout=900 --chdir /tmp/ribchester-* --user=user --name=ribchester -- sadt --verbose
parse:
pattern: '^(?P<test_case_id>[a-zA-Z0-9_\-\./]+)\s...\s(?P<result>ok|FAIL|SKIP)'
fixupdict:
ok: pass
FAIL: fail
SKIP: skip
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment