diff --git a/dsl/staticweb_jobs.groovy b/dsl/staticweb_jobs.groovy index 59465b1..ea8f81d 100644 --- a/dsl/staticweb_jobs.groovy +++ b/dsl/staticweb_jobs.groovy @@ -1,63 +1,66 @@ // Read the contents of the gathered-jobs.json file a step created for us previously def jobsToParse = readFileFromWorkspace('staticweb/gathered-jobs.json') def knownJobs = new groovy.json.JsonSlurper().parseText( jobsToParse ) // Iterate over all of the known jobs and create them! knownJobs.each { // Save our job name for later def jobName = "Website_${it.name}" // Likewise with our cron schedule def cronSchedule = "${it.cron}" // Read in the necessary Pipeline script def pipelineTemplate = readFileFromWorkspace("staticweb/pipeline-templates/${it.type}.pipeline") // Now we can construct our Pipeline script // We append a series of variables to the top of it to provide a variety of useful information to the otherwise templated script // These appended variables are what makes one build different to the next, aside from the template which was used def pipelineScript = """ |def name = "${it.name}" |def deploypath = "${it.deploypath}" + |def deployhost = "${it.deployserver.hostname}" + |def deployuser = "${it.deployserver.username}" + |def deploykey = "${it.deployserver.sshkey}" |def repositoryUrl = "${it.repositoryUrl}" |def gitBranch = "${it.branch}" |${pipelineTemplate}""".stripMargin() // Actually create the job now pipelineJob( jobName ) { properties { // We don't want to keep build results forever // We'll set it to keep the last 10 builds and discard everything else buildDiscarder { strategy { logRotator { numToKeepStr("5") daysToKeepStr('') artifactDaysToKeepStr('') artifactNumToKeepStr('') } } } // We don't want to be building the same project more than once // This is to prevent one project hogging resources // And also has a practical component as otherwise an older build could finish afterwards and upload old build results disableConcurrentBuilds() } triggers { // We want to enable SCM Polling so that git.kde.org can tell Jenkins to look for changes // At the same time, we don't want Jenkins scanning for changes, so set the Polling specification to be empty so nothing gets scheduled pollSCM { scmpoll_spec('') ignorePostCommitHooks(false) } // We want to automatically rebuild once a day cron( cronSchedule ) } // This is where the Pipeline script actually happens :) definition { cps { script( pipelineScript ) sandbox() } } } } diff --git a/staticweb/gather-jobs.py b/staticweb/gather-jobs.py index 7989870..2b32795 100644 --- a/staticweb/gather-jobs.py +++ b/staticweb/gather-jobs.py @@ -1,101 +1,130 @@ #!/usr/bin/python3 import os import sys import json import yaml import argparse # Parse the command line arguments we've been given parser = argparse.ArgumentParser(description='Utility to determine which jobs need to be registered in Jenkins.') parser.add_argument('--static-jobs', type=str, required=True, dest='staticJobs') parser.add_argument('--custom-jobs', type=str, required=True, dest='customJobs') arguments = parser.parse_args() +# List of systems we can upload to +possibleServers = { + 'nicoda': { + 'username': 'sitedeployer', + 'hostname': 'nicoda.kde.org', + 'sshkey' : '$HOME/WebsitePublishing/website-upload.key' + }, + 'cdn': { + 'username': 'contentdeployer', + 'hostname': 'milonia.kde.org', + 'sshkey' : '$HOME/WebsitePublishing/cdn-upload.key' + }, + 'edulis': { + 'username': 'sitedeployer', + 'hostname': 'edulis.kde.org', + 'sshkey' : '$HOME/WebsitePublishing/edulis-upload.key' + }, +} + # Grab the list of conventional, static jobs in order to commence processing it with open(arguments.staticJobs, 'r') as dataFile: # Parse the YAML file jobsToCreate = yaml.load( dataFile ) # Our output will be a list of Dictionaries, containing several keys: # 1) The name of the job # 2) The repository (or Subversion path) to be checked out # 3) The branch of the repository to be checked out (in the case of Subversion, this will be blank) # 4) The schedule on which the repository jobsGathered = [] # Let's get started processing conventional jobs! for jobPipelineTemplate in jobsToCreate.keys(): # We now go over each website that uses this particular pipeline template for website in jobsToCreate[ jobPipelineTemplate ]: # Construct the basic empty template for the job that will publish this website jobEntry = { 'name': '', 'repositoryUrl': '', 'branch': 'master', 'cron': '', 'type': jobPipelineTemplate, - 'deploypath': '' + 'deploypath': '', + 'deployserver': possibleServers['nicoda'], } # For the job entry we will need a name # To create this we take the domain this website is published at, and swap all dots with dashes jobEntry['name'] = website['domain'].replace('.', '-') # Does this website use Git for it's repository? if 'repository' in website: jobEntry['repositoryUrl'] = 'https://anongit.kde.org/' + website['repository'] # Otherwise could it be using SVN for it's repository? if 'svnpath' in website: jobEntry['repositoryUrl'] = 'svn://svn.kde.org/home/kde/' + website['svnpath'] + # Have we got a specified server to deploy this to? + if 'server' in website: + jobEntry['deployserver'] = possibleServers.get( website['server'], possibleServers['nicoda'] ) + # Along with the path it should be deployed at on the server jobEntry['deploypath'] = '/srv/www/generated/{0}/'.format( website['domain'] ) # Finally we can add it to the list jobsGathered.append( jobEntry ) # Now we have to process custom jobs # This is essentially done to calculate the Subversion/Git repository URL for them with open(arguments.customJobs, 'r') as dataFile: # Load the list of custom jobs in jobsToCreate = json.load(dataFile) # Process the custom jobs... for customJob in jobsToCreate: # Construct the standard empty template for this job which we'll then populate jobEntry = { 'name': '', 'repositoryUrl': '', 'branch': '', 'cron': '', 'type': '', - 'deploypath': '' + 'deploypath': '', + 'deployserver': possibleServers['nicoda'], } # Transfer across most of the details we can jobEntry['name'] = customJob['name'] jobEntry['branch'] = customJob['branch'] jobEntry['cron'] = customJob['cron'] jobEntry['type'] = customJob['type'] jobEntry['deploypath'] = customJob['deploypath'] # Does this website use Git for it's repository? if 'repository' in customJob: jobEntry['repositoryUrl'] = 'https://anongit.kde.org/' + customJob['repository'] # Otherwise could it be using SVN for it's repository? if 'svnpath' in customJob: jobEntry['repositoryUrl'] = 'svn://svn.kde.org/home/kde/' + customJob['svnpath'] + # Have we got a specified server to deploy this to? + if 'server' in website: + jobEntry['deployserver'] = possibleServers.get( website['server'], possibleServers['nicoda'] ) + # Finally we can add it to the list jobsGathered.append( jobEntry ) # Now output the jobs we've gathered in JSON to disk # This will subsequently be read in by a Jenkins DSL script and turned into Jenkins Jobs filePath = os.path.join( os.getcwd(), 'gathered-jobs.json') with open(filePath, 'w') as jobsFile: json.dump( jobsGathered, jobsFile, sort_keys=True, indent=2 ) # All done! sys.exit(0)