Skip to content
Snippets Groups Projects
Commit 94c9eedb authored by Luis Araujo's avatar Luis Araujo
Browse files

Initial import


Signed-off-by: default avatarLuis Araujo <luis.araujo@collabora.co.uk>
parents
No related branches found
No related tags found
No related merge requests found
Repository for Apertis tests cases.
metadata:
# Name of the test case.
name: libXY-test
# This is used to identify the test case format version.
# LAVA tests use something like: "Lava-Test-Shell Test Definition 1.0"
# For the Apertis project we could agree on using a single
# version both for manual and automated tests: "Apertis Test Definition 1.0"
format: "Apertis Test Definition 1.0"
# Image type:
# [ target, minimal, ostree, development, SDK, any ].
image-type: target
# Image architecture:
# [ amd64, arm64, armhf, any ]
image-arch: any
# This can be used to define a series of test case types, and
# could be any of the following values:
# [ functional, sanity, system, unittest ]
type: unittest
# The execution type of this test case:
# [ manual , automated ]
exec-type: automated
# This specifies the priority of the test case:
# [ low, medium, high, critical ]
priority: high
# Description of the test case.
description: "This is a test case for library libXY"
# Maintainer of the test case.
# It could include a project email (if available).
maintainer: "Apertis Project <qa@apertis.org>"
# List of hardware resources required to run this test case.
resources:
- "# Multitouch monitor (Model-XYZ)"
- "# USB Bt dongle (Model-XYZ)"
# A list of pre-conditions that needs to be met before running this test case.
# This can be basically anything required before start running the test,
# like for example, special privileges, repositories, commands executions.
pre-conditions:
- "# Development repository should be enabled"
- "# Ensure Rootfs is remounted as read/write"
- sudo mount -o remount,rw /
# The expected result for this test case.
# This can be a set of statements explaining the expected
# result or a copy/paste of the tests commands output.
expected:
- "# PASSED or FAILED"
# Section to add any extra notes to this test case.
# Links to examples or references could be added here too.
notes:
- "# Note 1"
- "# Note 2"
# This directive is used to install packages dependencies.
# Since OSTree images don't need to install packages, this directive will be
# omitted for new test cases, but it is still documented in this format for
# backward compatibility with previous non-ostree images.
install:
deps:
- libpkg-dep
# This directive open the execution steps section.
# The format used in this directive follows the LAVA format.
run:
# Steps to execute the test.
steps:
- "# Execute the following command."
- common/run-test-in-systemd --timeout=900 --basename ./libXY-test-cmd.sh
# Parse test results from the test output.
# This directive is only relevant for automated tests.
parse:
fixupdict:
FAILED: fail
PASSED: pass
pattern: '(?P<test_case_id>[^:]+): (?P<result>[A-Z]+)'
metadata:
name:
format: "Apertis Test Definition 1.0"
# [ target, minimal, ostree, development, SDK, any ].
image-type:
# [ amd64, arm64, armhf, any ]
image-arch:
# [ functional, sanity, system, unittest ]
type:
# [ manual , automated ]
exec-type:
# [ low, medium, high, critical ]
priority:
description:
maintainer: "Apertis Project"
resources:
pre-conditions:
expected:
notes:
# Only valid for non-ostree images.
install:
deps:
run:
steps:
- "# Execute the following command."
- echo "Hello Apertis!"
# Parse test results from the test command output.
# This directive is only relevant for LAVA automated tests.
parse:
fixupdict:
FAILED: fail
PASSED: pass
pattern: '(?P<test_case_id>[^:]+): (?P<result>[A-Z]+)'
metadata:
name: gettext-i18n
format: "Apertis Test Definition 1.0"
image-type: any
image-arch: any
type: functional
exec-type: automated
priority: medium
maintainer: "Apertis Project"
description: "Check that gettext internationalization works."
pre-conditions:
- "# From a PC, download and unpack the test data tarball from the gitlab test repository: https://gitlab.apertis.org/tests/gettext-i18n/-/archive/master/gettext-i18n.tar.gz"
- wget https://gitlab.apertis.org/tests/gettext-i18n/-/archive/master/gettext-i18n.tar.gz
- tar -xvf gettext-i18n.tar.gz
- "# Copy the gettext-i18n-master-* to the device"
- DUT_IP=<device-ip>
- scp -r gettext-i18n-master-* user@$DUT_IP:
- "# Log into the target"
- ssh user@$DUT_IP
- "# After log into the DUT, enter the test directory"
- cd gettext-i18n-master-*
- "# Note that the tarball may change depending on the release/branch being tested, please make sure to download the correct tarball for the release in question."
expected:
- "# The output should be pass or fail for each supported language."
run:
steps:
- "# Run the test script"
- common/run-test-in-systemd --timeout=900 --basename ./gettext-i18n.sh
parse:
fixupdict:
FAILED: fail
PASSED: pass
pattern: '(?P<test_case_id>[^:]+): (?P<result>[A-Z]+)'
metadata:
name: gstreamer1.0-decode
format: "Apertis Test Definition 1.0"
image-type: any
image-arch: any
type: functional
exec-type: automated
priority: high
maintainer: "Apertis Project"
description: "Test video and audio decoders and demuxers on a list of media.
Audio/video decoding through GStreamer is important but not
required by any of the currently strategic functionalities."
pre-conditions:
- "# From a PC, download and unpack the test data tarball from the gitlab test repository: https://gitlab.apertis.org/tests/gstreamer1.0-decode/-/archive/master/gstreamer1.0-decode-master.tar.gz"
- wget https://gitlab.apertis.org/tests/gstreamer1.0-decode/-/archive/master/gstreamer1.0-decode-master.tar.gz
- tar -xvf gstreamer1.0-decode-master.tar.gz
- "# Copy the gstreamer1.0-decode-master-* to the device"
- DUT_IP=<device-ip>
- scp -r gstreamer1.0-decode-master-* user@$DUT_IP:
- "# Log into the target"
- ssh user@$DUT_IP
- "# After log into the DUT, enter the test directory"
- cd gstreamer1.0-decode-master-*
- "# Note that the tarball may change depending on the release/branch being tested, please make sure to download the correct tarball for the release in question."
expected:
- "# The script will output a result for each media file it tries to decode. If it succeeds, PASSED will be displayed. If not, FAILED will be displayed."
notes:
- "# If lava-test-shell test fails within lava job. It means e.g. a job has timed out."
run:
steps:
- "# Run the test script"
- common/run-test-in-systemd --timeout=900 --basename bin/gstreamer1.0-decode.sh chaiwala-test-media
parse:
fixupdict:
FAILED: fail
PASSED: pass
pattern: ^TEST (?P<test_case_id>[^:]+):\W+(?P<result>\w+)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment