Package dak :: Module generate_releases
[hide private]
[frames] | no frames]

Source Code for Module dak.generate_releases

  1  #! /usr/bin/env python3 
  2   
  3  """ 
  4  Create all the Release files 
  5   
  6  @contact: Debian FTPMaster <ftpmaster@debian.org> 
  7  @copyright: 2011  Joerg Jaspert <joerg@debian.org> 
  8  @copyright: 2011  Mark Hymers <mhy@debian.org> 
  9  @license: GNU General Public License version 2 or later 
 10   
 11  """ 
 12   
 13  # This program is free software; you can redistribute it and/or modify 
 14  # it under the terms of the GNU General Public License as published by 
 15  # the Free Software Foundation; either version 2 of the License, or 
 16  # (at your option) any later version. 
 17   
 18  # This program is distributed in the hope that it will be useful, 
 19  # but WITHOUT ANY WARRANTY; without even the implied warranty of 
 20  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
 21  # GNU General Public License for more details. 
 22   
 23  # You should have received a copy of the GNU General Public License 
 24  # along with this program; if not, write to the Free Software 
 25  # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA 
 26   
 27  ################################################################################ 
 28   
 29  # <mhy> I wish they wouldnt leave biscuits out, thats just tempting. Damnit. 
 30   
 31  ################################################################################ 
 32   
 33  import sys 
 34  import os 
 35  import os.path 
 36  import time 
 37  import gzip 
 38  import bz2 
 39  import errno 
 40  import apt_pkg 
 41  import subprocess 
 42  from sqlalchemy.orm import object_session 
 43   
 44  import daklib.gpg 
 45  from daklib import utils, daklog 
 46  from daklib.regexes import re_gensubrelease, re_includeinrelease_byhash, re_includeinrelease_plain 
 47  from daklib.dbconn import * 
 48  from daklib.config import Config 
 49  from daklib.dakmultiprocessing import DakProcessPool, PROC_STATUS_SUCCESS 
 50   
 51  ################################################################################ 
 52  Logger = None                  #: Our logging object 
 53   
 54  ################################################################################ 
 55   
 56   
57 -def usage(exit_code=0):
58 """ Usage information""" 59 60 print("""Usage: dak generate-releases [OPTIONS] 61 Generate the Release files 62 63 -a, --archive=ARCHIVE process suites in ARCHIVE 64 -s, --suite=SUITE(s) process this suite 65 Default: All suites not marked 'untouchable' 66 -f, --force Allow processing of untouchable suites 67 CAREFUL: Only to be used at (point) release time! 68 -h, --help show this help and exit 69 -q, --quiet Don't output progress 70 71 SUITE can be a space separated list, e.g. 72 --suite=unstable testing 73 """) 74 sys.exit(exit_code)
75 76 ######################################################################## 77 78
79 -def sign_release_dir(suite, dirname):
80 cnf = Config() 81 82 if 'Dinstall::SigningKeyring' in cnf or 'Dinstall::SigningHomedir' in cnf: 83 args = { 84 'keyids': suite.signingkeys or [], 85 'pubring': cnf.get('Dinstall::SigningPubKeyring') or None, 86 'secring': cnf.get('Dinstall::SigningKeyring') or None, 87 'homedir': cnf.get('Dinstall::SigningHomedir') or None, 88 'passphrase_file': cnf.get('Dinstall::SigningPassphraseFile') or None, 89 } 90 91 relname = os.path.join(dirname, 'Release') 92 93 dest = os.path.join(dirname, 'Release.gpg') 94 if os.path.exists(dest): 95 os.unlink(dest) 96 97 inlinedest = os.path.join(dirname, 'InRelease') 98 if os.path.exists(inlinedest): 99 os.unlink(inlinedest) 100 101 with open(relname, 'r') as stdin: 102 with open(dest, 'w') as stdout: 103 daklib.gpg.sign(stdin, stdout, inline=False, **args) 104 stdin.seek(0) 105 with open(inlinedest, 'w') as stdout: 106 daklib.gpg.sign(stdin, stdout, inline=True, **args)
107 108
109 -class XzFile:
110 - def __init__(self, filename, mode='r'):
111 self.filename = filename
112
113 - def read(self):
114 with open(self.filename, 'rb') as stdin: 115 return subprocess.check_output(['xz', '-d'], stdin=stdin)
116 117
118 -class ZstdFile:
119 - def __init__(self, filename, mode='r'):
120 self.filename = filename
121
122 - def read(self):
123 with open(self.filename, 'rb') as stdin: 124 return subprocess.check_output(['zstd', '--decompress'], stdin=stdin)
125 126
127 -class HashFunc:
128 - def __init__(self, release_field, func, db_name):
129 self.release_field = release_field 130 self.func = func 131 self.db_name = db_name
132 133 134 RELEASE_HASHES = [ 135 HashFunc('MD5Sum', apt_pkg.md5sum, 'md5sum'), 136 HashFunc('SHA1', apt_pkg.sha1sum, 'sha1'), 137 HashFunc('SHA256', apt_pkg.sha256sum, 'sha256'), 138 ] 139 140
141 -class ReleaseWriter:
142 - def __init__(self, suite):
143 self.suite = suite
144
145 - def suite_path(self):
146 """ 147 Absolute path to the suite-specific files. 148 """ 149 suite_suffix = utils.suite_suffix(self.suite.suite_name) 150 151 return os.path.join(self.suite.archive.path, 'dists', 152 self.suite.suite_name, suite_suffix)
153
154 - def suite_release_path(self):
155 """ 156 Absolute path where Release files are physically stored. 157 This should be a path that sorts after the dists/ directory. 158 """ 159 cnf = Config() 160 suite_suffix = utils.suite_suffix(self.suite.suite_name) 161 162 return os.path.join(self.suite.archive.path, 'zzz-dists', 163 self.suite.suite_name, suite_suffix)
164 183
185 for path in (self.suite_path(), self.suite_release_path()): 186 try: 187 os.makedirs(path) 188 except OSError as e: 189 if e.errno != errno.EEXIST: 190 raise
191
192 - def _update_hashfile_table(self, session, fileinfo, hashes):
193 # Mark all by-hash files as obsolete. We will undo that for the ones 194 # we still reference later. 195 query = """ 196 UPDATE hashfile SET unreferenced = CURRENT_TIMESTAMP 197 WHERE suite_id = :id AND unreferenced IS NULL""" 198 session.execute(query, {'id': self.suite.suite_id}) 199 200 query = "SELECT path FROM hashfile WHERE suite_id = :id" 201 q = session.execute(query, {'id': self.suite.suite_id}) 202 known_hashfiles = set(row[0] for row in q) 203 updated = set() 204 new = set() 205 206 # Update the hashfile table with new or updated files 207 for filename in fileinfo: 208 if not os.path.lexists(filename): 209 # probably an uncompressed index we didn't generate 210 continue 211 byhashdir = os.path.join(os.path.dirname(filename), 'by-hash') 212 for h in hashes: 213 field = h.release_field 214 hashfile = os.path.join(byhashdir, field, fileinfo[filename][field]) 215 if hashfile in known_hashfiles: 216 updated.add(hashfile) 217 else: 218 new.add(hashfile) 219 220 if updated: 221 session.execute(""" 222 UPDATE hashfile SET unreferenced = NULL 223 WHERE path = ANY(:p) AND suite_id = :id""", 224 {'p': list(updated), 'id': self.suite.suite_id}) 225 if new: 226 session.execute(""" 227 INSERT INTO hashfile (path, suite_id) 228 VALUES (:p, :id)""", 229 [{'p': hashfile, 'id': self.suite.suite_id} for hashfile in new]) 230 231 session.commit()
232 253 270
271 - def generate_release_files(self):
272 """ 273 Generate Release files for the given suite 274 275 @type suite: string 276 @param suite: Suite name 277 """ 278 279 suite = self.suite 280 session = object_session(suite) 281 282 # Attribs contains a tuple of field names and the database names to use to 283 # fill them in 284 attribs = (('Origin', 'origin'), 285 ('Label', 'label'), 286 ('Suite', 'release_suite_output'), 287 ('Version', 'version'), 288 ('Codename', 'codename'), 289 ('Changelogs', 'changelog_url'), 290 ) 291 292 # A "Sub" Release file has slightly different fields 293 subattribs = (('Archive', 'suite_name'), 294 ('Origin', 'origin'), 295 ('Label', 'label'), 296 ('Version', 'version')) 297 298 # Boolean stuff. If we find it true in database, write out "yes" into the release file 299 boolattrs = (('NotAutomatic', 'notautomatic'), 300 ('ButAutomaticUpgrades', 'butautomaticupgrades'), 301 ('Acquire-By-Hash', 'byhash'), 302 ) 303 304 cnf = Config() 305 cnf_suite_suffix = cnf.get("Dinstall::SuiteSuffix", "").rstrip("/") 306 307 suite_suffix = utils.suite_suffix(suite.suite_name) 308 309 self.create_output_directories() 310 self.create_release_symlinks() 311 312 outfile = os.path.join(self.suite_release_path(), "Release") 313 out = open(outfile + ".new", "w") 314 315 for key, dbfield in attribs: 316 # Hack to skip NULL Version fields as we used to do this 317 # We should probably just always ignore anything which is None 318 if key in ("Version", "Changelogs") and getattr(suite, dbfield) is None: 319 continue 320 321 out.write("%s: %s\n" % (key, getattr(suite, dbfield))) 322 323 out.write("Date: %s\n" % (time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime(time.time())))) 324 325 if suite.validtime: 326 validtime = float(suite.validtime) 327 out.write("Valid-Until: %s\n" % (time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime(time.time() + validtime)))) 328 329 for key, dbfield in boolattrs: 330 if getattr(suite, dbfield, False): 331 out.write("%s: yes\n" % (key)) 332 333 skip_arch_all = True 334 if suite.separate_contents_architecture_all or suite.separate_packages_architecture_all: 335 # According to the Repository format specification: 336 # https://wiki.debian.org/DebianRepository/Format#No-Support-for-Architecture-all 337 # 338 # Clients are not expected to support Packages-all without Contents-all. At the 339 # time of writing, it is not possible to set separate_packages_architecture_all. 340 # However, we add this little assert to stop the bug early. 341 # 342 # If you are here because the assert failed, you probably want to see "update123.py" 343 # and its advice on updating the CHECK constraint. 344 assert suite.separate_contents_architecture_all 345 skip_arch_all = False 346 347 if not suite.separate_packages_architecture_all: 348 out.write("No-Support-for-Architecture-all: Packages\n") 349 350 architectures = get_suite_architectures(suite.suite_name, skipall=skip_arch_all, skipsrc=True, session=session) 351 352 out.write("Architectures: %s\n" % (" ".join(a.arch_string for a in architectures))) 353 354 components = [c.component_name for c in suite.components] 355 356 out.write("Components: %s\n" % (" ".join(components))) 357 358 # For exact compatibility with old g-r, write out Description here instead 359 # of with the rest of the DB fields above 360 if getattr(suite, 'description') is not None: 361 out.write("Description: %s\n" % suite.description) 362 363 for comp in components: 364 for dirpath, dirnames, filenames in os.walk(os.path.join(self.suite_path(), comp), topdown=True): 365 if not re_gensubrelease.match(dirpath): 366 continue 367 368 subfile = os.path.join(dirpath, "Release") 369 subrel = open(subfile + '.new', "w") 370 371 for key, dbfield in subattribs: 372 if getattr(suite, dbfield) is not None: 373 subrel.write("%s: %s\n" % (key, getattr(suite, dbfield))) 374 375 for key, dbfield in boolattrs: 376 if getattr(suite, dbfield, False): 377 subrel.write("%s: yes\n" % (key)) 378 379 subrel.write("Component: %s%s\n" % (suite_suffix, comp)) 380 381 # Urgh, but until we have all the suite/component/arch stuff in the DB, 382 # this'll have to do 383 arch = os.path.split(dirpath)[-1] 384 if arch.startswith('binary-'): 385 arch = arch[7:] 386 387 subrel.write("Architecture: %s\n" % (arch)) 388 subrel.close() 389 390 os.rename(subfile + '.new', subfile) 391 392 # Now that we have done the groundwork, we want to get off and add the files with 393 # their checksums to the main Release file 394 oldcwd = os.getcwd() 395 396 os.chdir(self.suite_path()) 397 398 hashes = [x for x in RELEASE_HASHES if x.db_name in suite.checksums] 399 400 fileinfo = {} 401 fileinfo_byhash = {} 402 403 uncompnotseen = {} 404 405 for dirpath, dirnames, filenames in os.walk(".", followlinks=True, topdown=True): 406 # SuiteSuffix deprecation: 407 # components on security-master are updates/{main,contrib,non-free}, but 408 # we want dists/${suite}/main. Until we can rename the components, 409 # we cheat by having an updates -> . symlink. This should not be visited. 410 if cnf_suite_suffix: 411 path = os.path.join(dirpath, cnf_suite_suffix) 412 try: 413 target = os.readlink(path) 414 if target == ".": 415 dirnames.remove(cnf_suite_suffix) 416 except (OSError, ValueError): 417 pass 418 for entry in filenames: 419 if dirpath == '.' and entry in ["Release", "Release.gpg", "InRelease"]: 420 continue 421 422 filename = os.path.join(dirpath.lstrip('./'), entry) 423 424 if re_includeinrelease_byhash.match(entry): 425 fileinfo[filename] = fileinfo_byhash[filename] = {} 426 elif re_includeinrelease_plain.match(entry): 427 fileinfo[filename] = {} 428 # Skip things we don't want to include 429 else: 430 continue 431 432 with open(filename, 'rb') as fd: 433 contents = fd.read() 434 435 # If we find a file for which we have a compressed version and 436 # haven't yet seen the uncompressed one, store the possibility 437 # for future use 438 if entry.endswith(".gz") and filename[:-3] not in uncompnotseen: 439 uncompnotseen[filename[:-3]] = (gzip.GzipFile, filename) 440 elif entry.endswith(".bz2") and filename[:-4] not in uncompnotseen: 441 uncompnotseen[filename[:-4]] = (bz2.BZ2File, filename) 442 elif entry.endswith(".xz") and filename[:-3] not in uncompnotseen: 443 uncompnotseen[filename[:-3]] = (XzFile, filename) 444 elif entry.endswith(".zst") and filename[:-3] not in uncompnotseen: 445 uncompnotseen[filename[:-3]] = (ZstdFile, filename) 446 447 fileinfo[filename]['len'] = len(contents) 448 449 for hf in hashes: 450 fileinfo[filename][hf.release_field] = hf.func(contents) 451 452 for filename, comp in uncompnotseen.items(): 453 # If we've already seen the uncompressed file, we don't 454 # need to do anything again 455 if filename in fileinfo: 456 continue 457 458 fileinfo[filename] = {} 459 460 # File handler is comp[0], filename of compressed file is comp[1] 461 contents = comp[0](comp[1], 'r').read() 462 463 fileinfo[filename]['len'] = len(contents) 464 465 for hf in hashes: 466 fileinfo[filename][hf.release_field] = hf.func(contents) 467 468 for field in sorted(h.release_field for h in hashes): 469 out.write('%s:\n' % field) 470 for filename in sorted(fileinfo.keys()): 471 out.write(" %s %8d %s\n" % (fileinfo[filename][field], fileinfo[filename]['len'], filename)) 472 473 out.close() 474 os.rename(outfile + '.new', outfile) 475 476 self._update_hashfile_table(session, fileinfo_byhash, hashes) 477 self._make_byhash_links(fileinfo_byhash, hashes) 478 self._make_byhash_base_symlink(fileinfo_byhash, hashes) 479 480 sign_release_dir(suite, os.path.dirname(outfile)) 481 482 os.chdir(oldcwd) 483 484 return
485 486
487 -def main():
488 global Logger 489 490 cnf = Config() 491 492 for i in ["Help", "Suite", "Force", "Quiet"]: 493 key = "Generate-Releases::Options::%s" % i 494 if key not in cnf: 495 cnf[key] = "" 496 497 Arguments = [('h', "help", "Generate-Releases::Options::Help"), 498 ('a', 'archive', 'Generate-Releases::Options::Archive', 'HasArg'), 499 ('s', "suite", "Generate-Releases::Options::Suite"), 500 ('f', "force", "Generate-Releases::Options::Force"), 501 ('q', "quiet", "Generate-Releases::Options::Quiet"), 502 ('o', 'option', '', 'ArbItem')] 503 504 suite_names = apt_pkg.parse_commandline(cnf.Cnf, Arguments, sys.argv) 505 Options = cnf.subtree("Generate-Releases::Options") 506 507 if Options["Help"]: 508 usage() 509 510 Logger = daklog.Logger('generate-releases') 511 pool = DakProcessPool() 512 513 session = DBConn().session() 514 515 if Options["Suite"]: 516 suites = [] 517 for s in suite_names: 518 suite = get_suite(s.lower(), session) 519 if suite: 520 suites.append(suite) 521 else: 522 print("cannot find suite %s" % s) 523 Logger.log(['cannot find suite %s' % s]) 524 else: 525 query = session.query(Suite).filter(Suite.untouchable == False) # noqa:E712 526 if 'Archive' in Options: 527 archive_names = utils.split_args(Options['Archive']) 528 query = query.join(Suite.archive).filter(Archive.archive_name.in_(archive_names)) 529 suites = query.all() 530 531 for s in suites: 532 # Setup a multiprocessing Pool. As many workers as we have CPU cores. 533 if s.untouchable and not Options["Force"]: 534 print("Skipping %s (untouchable)" % s.suite_name) 535 continue 536 537 if not Options["Quiet"]: 538 print("Processing %s" % s.suite_name) 539 Logger.log(['Processing release file for Suite: %s' % (s.suite_name)]) 540 pool.apply_async(generate_helper, (s.suite_id, )) 541 542 # No more work will be added to our pool, close it and then wait for all to finish 543 pool.close() 544 pool.join() 545 546 retcode = pool.overall_status() 547 548 if retcode > 0: 549 # TODO: CENTRAL FUNCTION FOR THIS / IMPROVE LOGGING 550 Logger.log(['Release file generation broken: %s' % (','.join([str(x[1]) for x in pool.results]))]) 551 552 Logger.close() 553 554 sys.exit(retcode)
555 556
557 -def generate_helper(suite_id):
558 ''' 559 This function is called in a new subprocess. 560 ''' 561 session = DBConn().session() 562 suite = Suite.get(suite_id, session) 563 564 # We allow the process handler to catch and deal with any exceptions 565 rw = ReleaseWriter(suite) 566 rw.generate_release_files() 567 568 return (PROC_STATUS_SUCCESS, 'Release file written for %s' % suite.suite_name)
569 570 ####################################################################################### 571 572 573 if __name__ == '__main__': 574 main() 575