extras-buildsys/builder CONFIG.py, NONE, 1.1 Makefile, NONE, 1.1 builder.py, NONE, 1.1
Daniel Williams (dcbw)
fedora-extras-commits at redhat.com
Mon Jun 27 21:29:23 UTC 2005
- Previous message (by thread): extras-buildsys ChangeLog,1.36,1.37 Makefile,1.3,1.4
- Next message (by thread): extras-buildsys/client client.py, NONE, 1.1 Makefile, 1.1, 1.2 CONFIG.py, 1.9, NONE buildclient.py, 1.19, NONE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
Author: dcbw
Update of /cvs/fedora/extras-buildsys/builder
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv17983/builder
Added Files:
CONFIG.py Makefile builder.py
Log Message:
2005-06-26 Dan Williams <dcbw at redhat.com>
* Move stuff around. The client that package maintainers will use
to submit jobs is now in client/, and the actual build daemon
has moved to builder/
--- NEW FILE CONFIG.py ---
# Configuration file for archwelder.py
config_opts = {}
config_opts['debug'] = True
config_opts['builder_cmd'] = "/usr/bin/mock"
# Distro and Repo:
#
# The build client constructs the buildroot name that it
# passes to mock from a few things. The format is:
#
# <distro_name>-<target>-<arch>-<repo_name>
#
# both <target> and <arch> come from the build server.
# This constructed name NEEDS to match a mock config
# file in /etc/mock.
config_opts['distro_name'] = "fedora"
config_opts['repo_name'] = "core"
config_opts['fileserver_port'] = 8889
config_opts['xmlrpc_port'] = 8888
CLIENT_DIR = "/etc/plague/client"
# SSL Certs and keys
# MUST be full path to cert
config_opts['client_cert'] = CLIENT_DIR + "/certs/client_cert.pem"
config_opts['client_key'] = CLIENT_DIR + "/certs/client_key.pem"
config_opts['ca_cert'] = CLIENT_DIR + "/certs/ca_cert.pem"
# Where to keep SRPMs to build and the finished products
# and logs.
# WARNING: this directory is world-readable via HTTP!
config_opts['client_work_dir'] = "/tmp/build_client_work"
--- NEW FILE Makefile ---
BINDIR=/usr/bin
ETCDIR=/etc
DESTDIR=''
INSTALL=/usr/bin/install
MKDIR=/bin/mkdir
clean:
rm -f *.pyc *.pyo *~ *.bak
CONFIGDIR=$(DESTDIR)/$(ETCDIR)/$(PKGNAME)/builder
install:
$(MKDIR) -p $(DESTDIR)/$(BINDIR)
$(INSTALL) -m 755 builder.py $(DESTDIR)/$(BINDIR)/$(PKGNAME)-builder
$(MKDIR) -p $(CONFIGDIR)
$(INSTALL) -m 755 CONFIG.py $(CONFIGDIR)/CONFIG.py
--- NEW FILE builder.py ---
#!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2005 Duke University
# written by Seth Vidal
# TODO: xml-rpc communication using 2-way ssl-cert-verified xmlrpc connection
import SimpleXMLRPCServer
import xmlrpclib
import socket
import os
import shutil
import popen2
import sha
import time
import sys
import string
import time
import fcntl
import urllib
import errno
from plague import SimpleHTTPSServer
from plague import FileDownloader
from plague import SimpleSSLXMLRPCServer
# Load in the config
execfile("/etc/plague/client/CONFIG.py")
g_our_hostname = None
certs = {}
certs['cert'] = config_opts['client_cert']
certs['key'] = config_opts['client_key']
certs['ca_cert'] = config_opts['ca_cert']
certs['peer_ca_cert'] = config_opts['ca_cert']
def get_url_for_file(file_path):
""" Return a URL pointing to a particular file in our work dir """
# Ensure the file we're turning into a URL lives in our client work dir
if not file_path.startswith(config_opts["client_work_dir"]):
return None
file_part = file_path[len(config_opts["client_work_dir"]) + 1:]
port = "%s" % config_opts['fileserver_port']
full_url = "https://" + g_our_hostname + ":" + port + "/" + file_part
return urllib.quote(full_url)
class BuildClientMock:
"""puts things together for an arch - baseclass for handling builds for
other arches"""
def __init__(self, uniqid, target, srpm_url):
self._uniqid = uniqid
self._status = 'init'
self._repo_locked = True
self._repo_locked_msg = False
self._files = []
self._pobj = None
self._target = target
self._srpm_url = srpm_url
self._log_fd = None
self._mock_config = None
self._result_dir = os.path.join(config_opts['client_work_dir'], self._uniqid, "result")
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
self._state_dir = os.path.join(config_opts['client_work_dir'], self._uniqid, "mock-state")
if not os.path.exists(self._state_dir):
os.makedirs(self._state_dir)
logfile = os.path.join(self._result_dir, "buildclient.log")
self._log_fd = open(logfile, "w+")
self.log("""Starting job:
Time: %s
Target: %s
UID: %s
Architecture: %s
SRPM: %s\n\n""" % (time.asctime(time.gmtime()), self._target, self._uniqid, self.buildarch, srpm_url))
srpm_filename = FileDownloader.get_base_filename_from_url(srpm_url, ['.src.rpm'])
if not srpm_filename:
self._status = 'failed'
self._srpm_path = None
self.log("Failed in __init__, couldn't extract SRPM filename.\n")
else:
self._srpm_path = os.path.join(config_opts['client_work_dir'], self._uniqid, "source", srpm_filename)
def die(self, sig=15):
if self._pobj and self._pobj.pid: # Can't kill the package download from build server
try:
self.log("Killing build process...\n")
os.kill(self._pobj.pid, sig)
except OSError, e:
self.log("Couldn't kill process %d: %s\n" % (self._pobj.pid, e))
else:
self.log("Killed.\n");
self._status = 'killed'
return True
def log(self, string):
if string and self._log_fd:
self._log_fd.write(string)
self._log_fd.flush()
os.fsync(self._log_fd.fileno())
if config_opts['debug']:
s = "%s: " % self._uniqid
sys.stdout.write(s + string)
sys.stdout.flush()
def start(self):
# check for existence of srpm before going on
self._download_srpm()
def _download_srpm(self):
self._status = 'downloading'
self.log("Starting download of %s.\n" % self._srpm_url)
target_dir = os.path.dirname(self._srpm_path)
dl_thread = FileDownloader.FileDownloader(self.dl_callback, self._srpm_url, self._srpm_url,
target_dir, ['.src.rpm'], certs)
dl_thread.start()
def dl_callback(self, status, cb_data):
url = cb_data
if status == 'done':
self._status = 'downloaded'
self.log("Retrieved %s.\n" % url)
elif status == 'failed':
# Don't overwrite our status with 'failed' if we were cancelled
# and a download error ocurred
if not self.is_done_status():
self._status = 'failed'
self.log("Failed to retrieve %s.\n" % url)
def _build(self):
self.log("Starting step 'building' with command:\n")
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
cmd = '%s %s -r %s --resultdir=%s --statedir=%s --uniqueext=%s %s' % (self.arch_command,
config_opts['builder_cmd'], self.buildroot,
self._result_dir, self._state_dir, self._uniqid, self._srpm_path)
self.log(" %s\n" % cmd)
self._pobj = popen2.Popen4(cmd=cmd, bufsize=1024)
fcntl.fcntl(self._pobj.fromchild.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
self._status = 'prepping'
# Poll a bit to wait for mock to write out the status file if
# its not there yet.
start_time = time.time()
mockstatusfile = os.path.join(self._state_dir, 'status')
while not os.path.exists(mockstatusfile):
time.sleep(0.5)
# if mock exited with an error report that error and not
# the missing status file.
exit_status = self._pobj.poll()
if exit_status > 0:
self._status = 'failed'
break
# Kill mock after 5s if it didn't dump the status file
if time.time() - start_time > 5:
self.log("Timed out waiting for the mock status file! %s\n" % mockstatusfile)
try:
self.log("Killing mock...\n")
os.kill(self._pobj.pid, 15)
except OSError, e:
self.log("Couldn't kill mock process %d: %s\n" % (self._pobj.pid, e))
else:
self.log("Killed.\n")
self._status = 'failed'
break
def _cleanup(self):
self.log("Cleaning up the buildroot...\n")
cmd = '%s %s clean --uniqueext=%s -r %s' % (self.arch_command,
config_opts['builder_cmd'], self._uniqid,
self.buildroot)
self.log(" %s\n" % cmd)
self._pobj = popen2.Popen4(cmd=cmd)
self._status = 'cleanup'
def _mock_is_prepping(self):
mock_status = self._get_mock_status()
if mock_status:
if mock_status == 'prep':
return True
elif mock_status == 'setu':
return True
return False
def _mock_using_repo(self):
mock_status = self._get_mock_status()
if mock_status:
if mock_status == 'init':
return True
elif mock_status == 'clea':
return True
elif mock_status == 'prep':
return True
elif mock_status == 'setu':
return True
return False
def _mock_is_closed(self):
mock_status = self._get_mock_status()
if mock_status and mock_status == "done":
return True
return False
def _get_mock_status(self):
mockstatusfile = os.path.join(self._state_dir, 'status')
if not os.path.exists(mockstatusfile):
return None
f = open(mockstatusfile, "r")
fcntl.fcntl(f.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
while True:
try:
f.seek(0, 0)
string = f.read(4)
except OSError, e:
if e.errno == errno.EAGAIN:
time.sleep(0.25)
continue
else:
if len(string) < 4:
continue
break
f.close()
string = string.lower()
return string
def _read_mock_config(self):
mockconfigfile = os.path.join(self._result_dir, 'mockconfig.log')
if not os.path.exists(mockconfigfile):
return None
f = open(mockconfigfile, "r")
contents = {}
for line in f:
(item, loc) = line.split('=')
item = item.strip()
loc = loc.strip()
contents[item] = loc
f.close()
return contents
def _grab_mock_output(self):
# We don't care about output from the 'cleanup' stage
if self._pobj and self._status != 'cleanup':
# Grab any mock output and write it to a log
string = ' '
while len(string) > 0:
try:
string = os.read(self._pobj.fromchild.fileno(), 1024)
except OSError, e:
if e.errno == errno.EAGAIN: # Resource temporarily unavailable
break
else:
self.log("Error reading mock output: %s\n" % e)
else:
self._log_fd.write(string)
self._log_fd.flush()
os.fsync(self._log_fd.fileno())
def _mock_done(self):
self._files = self._find_files()
self.log("\n\n-----------------------\n\n")
if self._status == 'done':
self.log("Job completed successfully.\n")
elif self._status == 'failed':
exit_status = self._pobj.poll()
self.log("Job failed due to mock errors! mock exit status: %d\n" % exit_status)
elif self._status == 'killed':
self.log("Job failed because it was killed.\n")
if self._log_fd:
self._log_fd.close()
self._log_fd = None
def process(self):
if self.is_done_status():
return
if self._status == 'downloading':
pass
elif self._status == 'downloaded':
# We can't start doing anything with yum until the build
# server tells us the repo is unlocked.
if not self._repo_locked:
self._build()
else:
# Only show this message once
if not self._repo_locked_msg:
self.log("Waiting for repository to unlock before starting the build...\n")
self._repo_locked_msg = True
elif self._status == 'prepping':
if not self._mock_config and self._mock_is_prepping():
self._mock_config = self._read_mock_config()
if not self._mock_using_repo():
self._status = 'building'
elif self._status == 'building':
exit_status = self._pobj.poll()
if exit_status == 0:
# mock completed successfully
if self._status != 'building':
self.log("Bad job end status %s encountered!" % self._status)
self._cleanup()
elif exit_status > 0:
# mock exited with an error
self._status = 'failed'
elif self._status == 'cleanup':
exit_status = self._pobj.poll()
if exit_status >= 0:
# We ignore mock errors when cleaning the buildroot
self._status = 'done'
if self._mock_config.has_key('rootdir'):
shutil.rmtree(self._mock_config['rootdir'], ignore_errors=True)
self._grab_mock_output()
if self.is_done_status():
self._mock_done()
def _find_files(self):
# Grab the list of files in our job's result dir and URL encode them
files_in_dir = os.listdir(self._result_dir)
file_list = []
self.log("\n\nOutput File List:\n-----------------\n")
for f in files_in_dir:
file_url = get_url_for_file(os.path.join(self._result_dir, f))
if file_url:
file_list.append(file_url)
self.log(" Output File: %s\n" % urllib.unquote(file_url))
else:
self.log(" Error: Couldn't get file URL for file %s" % f)
return file_list
def status(self):
return self._status
def files(self):
return self._files
def repo_unlocked(self):
self._repo_locked = False
return 0
def is_done_status(self):
if (self._status is 'done') or (self._status is 'killed') or (self._status is 'failed'):
return True
return False
class i386Arch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-i386-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = '/usr/bin/setarch i686'
BuildClientMock.__init__(self, uniqid, target, srpm_url)
class x86_64Arch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-x86_64-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = ''
BuildClientMock.__init__(self, uniqid, target, srpm_url)
class PPCArch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-ppc-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = ''
BuildClientMock.__init__(self, uniqid, target, srpm_url)
class PPC64Arch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-ppc64-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = ''
BuildClientMock.__init__(self, uniqid, target, srpm_url)
class SparcArch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-sparc-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = '/usr/bin/sparc32'
BuildClientMock.__init__(self, uniqid, target, srpm_url)
class Sparc64Arch(BuildClientMock):
def __init__(self, uniqid, target, buildarch, srpm_url):
self.buildroot = '%s-%s-sparc64-%s' % (config_opts['distro_name'], target, config_opts['repo_name'])
self.buildarch = buildarch
self.arch_command = '/usr/bin/sparc64'
BuildClientMock.__init__(self, uniqid, target, srpm_url)
# Keep this global scope, used in __main__
builder_dict = {'i386': i386Arch,
'i486': i386Arch,
'i586': i386Arch,
'i686': i386Arch,
'athlon': i386Arch,
'x86_64': x86_64Arch,
'amd64': x86_64Arch,
'ia32e': x86_64Arch,
'ppc': PPCArch,
'ppc32': PPCArch,
'ppc64': PPC64Arch,
'sparc': SparcArch,
'sparcv8': SparcArch,
'sparcv9': SparcArch,
'sparc64': Sparc64Arch
}
def getBuildClient(uniqid, target, buildarch, srpm_url, localarches):
"""hand it an arch it hands you back the build client instance you need"""
if not builder_dict.has_key(buildarch):
# raise an exception here bitching about no place to build for that arch
pass
if buildarch == 'noarch':
if len(localarches) > 0:
builder = builder_dict[localarches[0]]
else:
if buildarch in localarches:
builder = builder_dict[buildarch]
bcp = builder(uniqid, target, buildarch, srpm_url)
return bcp
def log(string):
if config_opts['debug']:
print string
class XMLRPCBuildClientServer:
def __init__(self, localarches):
self.ids = {} # unique id => awclass instance
self.localarches = localarches
self.cur_job = 0
def process(self):
# Give jobs some time to update their status and do their thing
job = 0
for (uniqid, bcp) in self.ids.iteritems():
if not bcp.is_done_status():
bcp.process()
job = uniqid
self.cur_job = job # Update current job
def start(self, target, buildarch, srpm_url):
if self.cur_job != 0:
log("Tried to build '%s' when already buiding something" % srpm_url)
return 0
cur_time = time.time()
check = '%d %s %s %s' % (cur_time, target, buildarch, srpm_url)
sum = sha.new()
sum.update(check)
uniqid = sum.hexdigest()
if target == 'devel':
target = 'development'
bcp = getBuildClient(uniqid, target, buildarch, srpm_url, self.localarches)
if bcp != None:
self.ids[uniqid] = bcp
bcp.start()
filename = os.path.basename(srpm_url)
log("%s: started %s on %s arch %s at time %d" % (uniqid, filename,
target, buildarch, cur_time))
else:
log("%s: Failed request for %s on %s UNSUPPORTED arch %s at time %d" %
(uniqid, srpm_url, target, buildarch, cur_time))
uniqid = 0
self.cur_job = uniqid
return uniqid
def status(self, uniqid=None):
if not uniqid:
uniqid = self.cur_job
if not uniqid:
return 'idle'
try:
bcp = self.ids[uniqid]
except KeyError, e:
bcp = None
if not bcp:
return 'idle'
return bcp.status()
def die(self, uniqid):
bcp = self.ids[uniqid]
return bcp.die()
def files(self, uniqid):
bcp = self.ids[uniqid]
return bcp.files()
def repo_unlocked(self, uniqid):
bcp = self.ids[uniqid]
return bcp.repo_unlocked()
def listjobs(self):
return self.ids.keys()
def get_cur_job(self):
""" Are we currently building something? """
return self.cur_job
def supported_arches(self):
return self.localarches
if __name__ == '__main__':
if len(sys.argv) < 3:
print "Usage:\n"
print " %s <hostname> <archlist>\n" % sys.argv[0]
print " <hostname> - hostname or IP address of this machine"
print " <archlist> - space-separated list of arches this machine can build"
# pretty-print the available archlist
archlist = ""
avail_arches = builder_dict.keys()
avail_arches.sort()
for a in avail_arches:
archlist = archlist + a
if a != avail_arches[len(avail_arches)-1]:
archlist = archlist + ", "
print " Available arches: [ %s ]\n" % archlist
sys.exit(1)
g_our_hostname = sys.argv[1]
localarches = sys.argv[2:]
print "Binding to address '%s' with arches: [%s]" % (g_our_hostname, string.join(localarches))
xmlrpc_port = config_opts['xmlrpc_port']
xmlserver = SimpleSSLXMLRPCServer.SimpleSSLXMLRPCServer(certs, (g_our_hostname, xmlrpc_port))
bcs = XMLRPCBuildClientServer(localarches)
xmlserver.register_instance(bcs)
# Start up the HTTP server thread which the build server
# pulls completed RPMs from
work_dir = config_opts['client_work_dir']
fs_port = config_opts['fileserver_port']
http_server = SimpleHTTPSServer.SimpleHTTPSServer(certs, (g_our_hostname, fs_port), work_dir)
http_server.start()
last_time = time.time()
while True:
try:
xmlserver.handle_request()
except KeyboardInterrupt, e:
print "Shutting down..."
break
except socket.error, e:
if e[0] == 11: # Resource temporarily unavailable
try:
time.sleep(0.1)
except KeyboardInterrupt, e:
print "Shutting down..."
break
cur_time = time.time()
if cur_time >= last_time + 3:
# do some work every 3s or so
bcs.process()
last_time = time.time()
os._exit(0)
- Previous message (by thread): extras-buildsys ChangeLog,1.36,1.37 Makefile,1.3,1.4
- Next message (by thread): extras-buildsys/client client.py, NONE, 1.1 Makefile, 1.1, 1.2 CONFIG.py, 1.9, NONE buildclient.py, 1.19, NONE
- Messages sorted by:
[ date ]
[ thread ]
[ subject ]
[ author ]
More information about the fedora-extras-commits
mailing list