Forked from
infrastructure / apertis-image-recipes
675 commits behind the upstream repository.
-
Emanuele Aina authored
Bring back the automatic check for production builds so that images are put in their main location and test jobs are submitted to LAVA, but mangle the LAVA webhook callback URL so that we don't create too much noise in Phabricator at this point. Signed-off-by:
Emanuele Aina <emanuele.aina@collabora.com>
Emanuele Aina authoredBring back the automatic check for production builds so that images are put in their main location and test jobs are submitted to LAVA, but mangle the LAVA webhook callback URL so that we don't create too much noise in Phabricator at this point. Signed-off-by:
Emanuele Aina <emanuele.aina@collabora.com>
Jenkinsfile 20.56 KiB
#!/usr/bin/env groovy
osname = 'apertis'
release = "next"
/* Determine whether to run uploads based on the prefix of the job name; in
* case of apertis we expect the official jobs under apertis-<release>/ while
* non-official onces can be in e.g. playground/ */
def production = env.JOB_NAME.startsWith("${osname}-")
docker_registry_name = 'docker-registry.apertis.org'
docker_image_name = "${docker_registry_name}/apertis/apertis-19.03-image-builder"
upload_host = "archive@images.apertis.org"
upload_root = "/srv/images/" + (production ? "public" : "test/${env.JOB_NAME}")
upload_dest = "${upload_host}:${upload_root}"
upload_credentials = '5a23cd79-e26d-41bf-9f91-d756c131b811'
image_url_prefix = "https://images.apertis.org" + (production ? "" : "/test/${env.JOB_NAME}")
ostree_path = "ostree/repo/"
ostree_pull_url = image_url_prefix + "/${ostree_path}"
test_repo_url = "git@gitlab.apertis.org:infrastructure/apertis-tests.git"
test_repo_credentials = 'df7b609b-df30-431d-a942-af263af80571'
test_repo_branch = 'master'
test_lava_credentials = 'apertis-lava-user'
demopack = "https://images.apertis.org/media/multimedia-demo.tar.gz"
sysroot_url_prefix = image_url_prefix + "/sysroot/"
def architectures = [
amd64: [
boards: ['uefi'],
types: [
minimal: [
args: "-t demopack:${demopack}",
image: true,
sysroot: false,
//ostree: true,
],/*
target: [
args: "-t demopack:${demopack}",
image: true,
sysroot: false,
ostree: true,
],
sysroot: [
args: '--scratchsize 10G',
image: false,
sysroot: true,
ostree: false,
],
sdk: [
args: "-t demopack:${demopack} -t sampleappscheckout:enabled --scratchsize 10G",
boards: [ 'sdk' ],
image: true,
sysroot: false,
ostree: false,
requires: [
armhf: 'devroot',
]
],
basesdk: [
args: '--scratchsize 10G',
boards: [ 'sdk' ],
image: true,
sysroot: false,
ostree: false,
requires: [
armhf: 'devroot',
]
]*/
]
],
arm64: [
boards: ['uboot'],
types: [
minimal: [
args: "-t demopack:${demopack}",
image: true,
sysroot: false,
//ostree: true,
],/*
target: [
args: "-t demopack:${demopack}",
image: true,
sysroot: false,
ostree: true,
],
sysroot: [
args: '--scratchsize 10G',
image: false,
sysroot: true,
ostree: false,
]*/
]
],
armhf: [
boards: ['uboot'],
types: [
minimal: [
args: "-t demopack:${demopack}",
image: true,
sysroot: false,
//ostree: true
],/*
sysroot: [
args: '--scratchsize 10G',
image: false,
sysroot: true,
ostree: false
],
devroot: [
args: '--scratchsize 10G',
image: false,
sysroot: false,
ostree: false
]*/
]
]
]
properties([
parameters([
string(name: 'buildOnly', defaultValue: '', description: 'If set, only the selected images are built. Comma and slash separated, e.g. armhf/minimal, amd64/target', trim: true),
])
])
def buildOnlyList = params?.buildOnly.tokenize(/, ?/).collect { it.tokenize('/') }
buildOnlyList = buildOnlyList
.groupBy{ it[0] }
.collectEntries{ arch, values -> [arch, values.findResults { it[1] } ] }
def buildCandidates = architectures
if (buildOnlyList) {
buildCandidates = buildCandidates
// filter out architectures which won't be built
.subMap(buildOnlyList.keySet())
.collectEntries{ arch, architecture ->
// filter out artifact types which won't be built
types = architecture.types
if (buildOnlyList[arch])
types = architecture.types.subMap(buildOnlyList[arch])
[arch, architecture + [
types: types
]]
}
}
def uploadDirectory(source, target) {
sshagent(credentials: [ upload_credentials, ] ) {
sh(script: """
ssh -oStrictHostKeyChecking=no ${upload_host} mkdir -p ${upload_root}/${target}/
rsync -e 'ssh -oStrictHostKeyChecking=no' -aP ${source} ${upload_dest}/${target}/
""")
}
}
def pushOstreeRepo(architecture, type, board) {
def repo = "repo-${architecture}-${board}-${type}/"
def branch = "${osname}/${release}/${architecture}-${board}/${type}"
// push if the destination repository already exist, otherwise do a full repo upload
// but use --ignore-existing to mitigate the chance of races
sshagent(credentials: [ upload_credentials, ] ) {
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
if ssh -oStrictHostKeyChecking=no ${upload_host} test -e ${upload_root}/${ostree_path}
then
ostree-push --repo ${repo} ${upload_dest}/${ostree_path} ${branch}
else
ostree remote --repo=${repo} delete origin # drop the remote before publishing
ssh -oStrictHostKeyChecking=no ${upload_host} mkdir -p ${upload_root}/${ostree_path}/
rsync -e 'ssh -oStrictHostKeyChecking=no' --ignore-existing -aP ${repo} ${upload_dest}/${ostree_path}
fi""")
}
// Cleanup uploaded branch
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
rm -rf ${repo}""")
}
def submitTests(architecture, type, board, ostree = false) {
def image_name = imageName(architecture, type, board, ostree)
def version = env.PIPELINE_VERSION
def name = osname
if(ostree){
name = "${osname}_ostree"
}
def profile_name = "${name}-${type}-${architecture}-${board}"
dir ("apertis-tests") {
git(url: test_repo_url,
poll: false,
credentialsId: test_repo_credentials,
branch: test_repo_branch)
}
withCredentials([ file(credentialsId: test_lava_credentials, variable: 'lqaconfig'),
string(credentialsId: 'lava-phab-bridge-token', variable: 'token')]) {
sh(script: """
/usr/bin/lava-submit -c ${lqaconfig} \
-g apertis-tests/templates/profiles.yaml \
--profile ${profile_name} \
--callback-secret ${token} \
--callback-url HACK-TO-TEMPORARILY-DISABLE-BUG-REPORTING-https://lavaphabbridge.apertis.org/ \
-t release:${release} \
-t image_date:${version} \
-t image_name:${image_name}""")
}
}
def buildOStree(architecture, type, board, debosarguments = "", repo = "repo") {
def image_name = imageName(architecture, type, board, true)
def branch = "${osname}/${release}/${architecture}-${board}/${type}"
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
rm -rf ${repo}
mkdir ${repo}
ostree init --repo=${repo} --mode archive-z2
ostree remote --repo=${repo} add --no-gpg-verify origin ${ostree_pull_url}
http_code=\$(curl --location --silent -o /dev/null --head -w "%{http_code}" ${ostree_pull_url}/refs/heads/${branch})
case \$http_code in
200)
ostree pull --repo=${repo} --depth=-1 --mirror --disable-fsync origin ${branch}
;;
404)
;;
*)
echo "Error: Got HTTP \$http_code trying to fetch ${ostree_pull_url}/refs/heads/${branch}"
exit 1
;;
esac
""")
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
debos ${debosarguments} \
--show-boot \
-t architecture:${architecture} \
-t type:$type \
-t board:$board \
-t suite:$release \
-t ospack:ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION} \
-t image:${image_name} \
-t message:${release}-${type}-${architecture}-${board}_${PIPELINE_VERSION} \
-t ostree:${repo} \
${WORKSPACE}/${osname}-ostree-commit.yaml""")
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
ostree --repo=${repo} static-delta generate \
--empty \
--to=${branch} \
--inline \
--min-fallback-size=1024 \
--filename ${image_name}.delta""")
}
/** Generate the image name
*
* To have a single place for image name generation.
*
* @return string with the name
*/
def imageName(architecture, type, board, ostree = false){
def name = osname
if (ostree) {
name = "${osname}_ostree"
}
return "${name}_${release}-${type}-${architecture}-${board}_${PIPELINE_VERSION}"
}
////////////////// High-level tasks //////////////////
def buildOSpack(architecture, type, debosarguments = "") {
stage("${architecture} ${type} ospack") {
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
debos ${debosarguments} \
--show-boot \
-t type:${type} \
-t architecture:${architecture} \
-t suite:${release} \
-t timestamp:${PIPELINE_VERSION} \
-t ospack:ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION} \
${WORKSPACE}/${osname}-ospack-${type}.yaml""")
}
}
def buildImage(architecture, type, board, debosarguments = "") {
// Build image name here so it can easily be re-used by phases.
def image_name = imageName(architecture, type, board, false)
stage("${architecture} ${type} ${board} image") {
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
debos ${debosarguments} \
--show-boot \
-t architecture:${architecture} \
-t type:${type} \
-t ospack:ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION} \
-t imageroot:${image_url_prefix}/daily/${release} \
-t suite:$release \
-t timestamp:${PIPELINE_VERSION} \
-t image:${image_name} \
${WORKSPACE}/${osname}-image-${board}.yaml""")
}
}
def buildOStreeImage(architecture, type, board, debosarguments = "") {
def repo = "repo-${architecture}-${board}-${type}"
def image_name = imageName(architecture, type, board, true)
stage("${architecture} ${type} ${board} OStree image build") {
buildOStree(architecture, type, board, debosarguments, repo)
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
debos ${debosarguments} \
--show-boot \
-t architecture:${architecture} \
-t type:$type \
-t board:$board \
-t suite:$release \
-t ospack:ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION} \
-t image:${image_name} \
-t message:${release}-${type}-${architecture}-${board}_${PIPELINE_VERSION} \
-t ostree:${repo} \
${WORKSPACE}/apertis-ostree-image-${board}.yaml;""")
}
}
def buildContainer(architecture, type, board, debosarguments = "") {
def repo = "repo-${architecture}-${board}-${type}"
buildOStree(architecture, type, board, debosarguments, repo)
stage("${architecture} ${type} ${board} OStree pack") {
sh(script: """
cd ${PIPELINE_VERSION}/${architecture}/${type}
debos ${debosarguments} \
--show-boot \
-t architecture:${architecture} \
-t type:$type \
-t suite:$release \
-t repourl:${ostree_pull_url} \
-t osname:${osname} \
-t branch:${osname}/$release/${architecture}-${board}/${type} \
-t ospack:${osname}_ostree_${release}-${type}-${architecture}-${board}_${PIPELINE_VERSION} \
-t message:${release}-${type}-${architecture}-${type}-${board}_${PIPELINE_VERSION} \
-t ostree:${repo} \
${WORKSPACE}/${osname}-ostree-pack.yaml""")
}
}
def buildSysroot(architecture, type, debosarguments = "") {
sysrootname = "sysroot-${osname}-${release}-${architecture}-${env.PIPELINE_VERSION}"
stage("${architecture} sysroot tarball") {
sh(script: """
mkdir -p sysroot/${release}
cd sysroot/${release}
cp -l ${WORKSPACE}/${PIPELINE_VERSION}/${architecture}/${type}/ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION}.tar.gz .
debos ${debosarguments} \
--show-boot \
-t architecture:${architecture} \
-t ospack:ospack_${release}-${architecture}-${type}_${PIPELINE_VERSION} \
-t sysroot:${sysrootname} \
${WORKSPACE}/${osname}-sysroot.yaml; \
rm ospack*""")
// Generate sysroot metadata
writeFile file: "sysroot/${release}/sysroot-${osname}-${release}-${architecture}", text: "version=${release} ${PIPELINE_VERSION}\nurl=${sysroot_url_prefix}${release}/${sysrootname}.tar.gz"
}
}
/**
* Build OSPack for architecture and start a parallel build of artifacts related
* to target boards.
*
* @boards -- array with board names
*/
def buildImages(architecture, type, boards, debosarguments = "", image = true, sysroot = false, ostree = false, production = false) {
return {
node("docker-slave") {
checkout scm
docker.withRegistry("https://${docker_registry_name}") {
buildenv = docker.image(docker_image_name)
/* Pull explicitly to ensure we have the latest */
buildenv.pull()
buildenv.inside("--device=/dev/kvm") {
stage("setup ${architecture} ${type}") {
env.PIPELINE_VERSION = VersionNumber(versionNumberString: '${BUILD_DATE_FORMATTED,"yyyyMMdd"}.${BUILDS_TODAY_Z}')
sh ("env ; mkdir -p ${PIPELINE_VERSION}/${architecture}/${type}")
}
// Add successfully build artifacts here to know which ones we need to upload and test
// Valid values atm:
// - image-apt-${board} -- for apt-based images
// - image-ostree-${board} -- for ostree-based images
// - lxc-ostree -- for LXC tarball
// - sysroot -- for sysroot tarball
def buildStatus = [:]
// The real work starts here
try {
// If that fails -- do not need to build the rest
buildOSpack(architecture, type, debosarguments)
if (image) {
// Create apt-based images for all boards
for(String board: boards) {
try {
buildImage(architecture, type, board, debosarguments)
buildStatus["image-apt-${board}"] = true
} catch (e) {
// If image build failed -- do not fail other types but do not need to start tests for it
buildStatus["image-apt-${board}"] = false
}
}
}
if (ostree) {
// Create ostree-based images for all boards
for(String board: boards) {
try {
buildOStreeImage(architecture, type, board, debosarguments)
buildStatus["image-ostree-${board}"] = true
} catch (e) {
// If image build failed -- do not fail other types but do not need to start tests for it
buildStatus["image-ostree-${board}"] = false
}
}
/* Create ostree and tarball for container (board name = lxc) */
try {
buildContainer(architecture, type, "lxc", debosarguments)
buildStatus["lxc-ostree"] = true
} catch (e) {
// If image build failed -- do not fail other types but do not need to start tests for it
buildStatus["lxc-ostree"] = false
}
}
if (sysroot) {
// Create sysroot
try {
buildSysroot(architecture, type, debosarguments)
buildStatus["sysroot"] = true
} catch (e) {
// If image build failed -- do not fail other types but do not need to start tests for it
buildStatus["sysroot"] = false
}
}
// Mark the whole pipeline as failed in case of failure at any stage
if (buildStatus.containsValue(false)) {
currentBuild.result = 'FAILURE'
// mark builds where some artifacts have failed to build
dir ("${env.PIPELINE_VERSION}/meta/") {
writeFile (file: "failed-${architecture}-${type}", text: '')
}
}
// Upload artifacts
stage("${architecture} ${type} upload") {
// Push OStree repos first to remove repository prior images upload
for(String board: boards) {
if(buildStatus["image-ostree-${board}"]) {
pushOstreeRepo(architecture, type, board)
}
}
if(buildStatus["lxc-ostree"]) {
pushOstreeRepo(architecture, type, "lxc")
}
// Upload all other artifacts like ospacks and images if any
uploadDirectory (env.PIPELINE_VERSION, "daily/${release}")
if(buildStatus["sysroot"]) {
uploadDirectory ("sysroot/${release}/*", "sysroot/${release}")
}
}
// This stage must be the last in pipeline
// a failure to submit the tests would break the builds
stage("Tests for ${architecture} ${type}") {
if (production) {
for(String board: boards) {
if(buildStatus["image-apt-${board}"]) {
submitTests(architecture, type, board, false)
}
if(buildStatus["image-ostree-${board}"]) {
submitTests(architecture, type, board, true)
}
}
} else {
println "Skipping submitting tests jobs for ${architecture} ${type}"
}
}
} finally {
stage("Cleanup ${architecture} ${type}") {
deleteDir()
}
}
}
}
}
}
}
def first_pass = [:]
def second_pass = [:]
buildCandidates.each { name, arch ->
arch.types.each { type, params ->
/* merge the per-arch default with per-type params */
def merged = [:] << arch << params
if (!params.requires) {
/* first, build all jobs which don’t have any dependencies, in parallel */
first_pass << [("$name $type"):
buildImages(name, type, merged.boards, merged.args, merged.image, merged.sysroot, merged.ostree, production)
]
} else {
/* second, build any jobs which depend on jobs from the first pass, also in parallel */
second_pass << [("$name $type"):
buildImages(name, type, merged.boards, merged.args, merged.image, merged.sysroot, merged.ostree, production)
]
}
}
}
parallel first_pass
parallel second_pass
node() {
stage("upload meta") {
skipped = architectures - buildCandidates
complete = !skipped && currentBuild.resultIsBetterOrEqualTo('SUCCESS')
skippednames = []
for (architecture in skipped) {
for (type in architecture.value.types) {
skippednames << "skipped-${architecture.key}-${type.key}"
}
}
dir ("${env.PIPELINE_VERSION}/meta/") {
writeFile (file: "build-version", text: env.PIPELINE_VERSION)
writeFile (file: "build-url", text: env.BUILD_URL)
// report which entries have been skipped
for (String skippedname : skippednames) {
writeFile (file: skippedname, text: '')
}
// mark builds that have not been killed half way
writeFile (file: "finished", text: '')
// mark successful builds with no skipped artifacts, they're the best candidates for longer term storage
if (complete) {
writeFile (file: "complete", text: '')
}
}
uploadDirectory (env.PIPELINE_VERSION, "daily/${release}")
}
}