Package yum :: Module misc
[hide private]
[frames] | no frames]

Source Code for Module yum.misc

   1  """ 
   2  Assorted utility functions for yum. 
   3  """ 
   4   
   5  import types 
   6  import os 
   7  import os.path 
   8  from cStringIO import StringIO 
   9  import base64 
  10  import struct 
  11  import re 
  12  import errno 
  13  import Errors 
  14  import constants 
  15  import pgpmsg 
  16  import tempfile 
  17  import glob 
  18  import pwd 
  19  import fnmatch 
  20  import bz2 
  21  import gzip 
  22  from rpmUtils.miscutils import stringToVersion, flagToString 
  23  from stat import * 
  24  try: 
  25      import gpgme 
  26      import gpgme.editutil 
  27  except ImportError: 
  28      gpgme = None 
  29  try: 
  30      import hashlib 
  31      _available_checksums = set(['md5', 'sha1', 'sha256', 'sha512']) 
  32      _default_checksums = ['sha256'] 
  33  except ImportError: 
  34      # Python-2.4.z ... gah! 
  35      import sha 
  36      import md5 
  37      _available_checksums = set(['md5', 'sha1']) 
  38      _default_checksums = ['sha1'] 
39 - class hashlib:
40 41 @staticmethod
42 - def new(algo):
43 if algo == 'md5': 44 return md5.new() 45 if algo == 'sha1': 46 return sha.new() 47 raise ValueError, "Bad checksum type"
48 49 from Errors import MiscError 50 # These are API things, so we can't remove them even if they aren't used here. 51 # pylint: disable-msg=W0611 52 from i18n import to_utf8, to_unicode 53 # pylint: enable-msg=W0611 54 55 _share_data_store = {} 56 _share_data_store_u = {}
57 -def share_data(value):
58 """ Take a value and use the same value from the store, 59 if the value isn't in the store this one becomes the shared version. """ 60 # We don't want to change the types of strings, between str <=> unicode 61 # and hash('a') == hash(u'a') ... so use different stores. 62 # In theory eventaully we'll have all of one type, but don't hold breath. 63 store = _share_data_store 64 if isinstance(value, unicode): 65 store = _share_data_store_u 66 # hahahah, of course the above means that: 67 # hash(('a', 'b')) == hash((u'a', u'b')) 68 # ...which we have in deptuples, so just screw sharing those atm. 69 if type(value) == types.TupleType: 70 return value 71 return store.setdefault(value, value)
72
73 -def unshare_data():
74 global _share_data_store 75 global _share_data_store_u 76 _share_data_store = {} 77 _share_data_store_u = {}
78 79 _re_compiled_glob_match = None
80 -def re_glob(s):
81 """ Tests if a string is a shell wildcard. """ 82 # TODO/FIXME maybe consider checking if it is a stringsType before going on - otherwise 83 # returning None 84 global _re_compiled_glob_match 85 if _re_compiled_glob_match is None: 86 _re_compiled_glob_match = re.compile('[*?]|\[.+\]').search 87 return _re_compiled_glob_match(s)
88 89 _re_compiled_filename_match = None
90 -def re_filename(s):
91 """ Tests if a string could be a filename. We still get negated character 92 classes wrong (are they supported), and ranges in character classes. """ 93 global _re_compiled_filename_match 94 if _re_compiled_filename_match is None: 95 _re_compiled_filename_match = re.compile('[/*?]|\[[^]]*/[^]]*\]').match 96 return _re_compiled_filename_match(s)
97
98 -def re_primary_filename(filename):
99 """ Tests if a filename string, can be matched against just primary. 100 Note that this can produce false negatives (but not false 101 positives). """ 102 if 'bin/' in filename: 103 return True 104 if filename.startswith('/etc/'): 105 return True 106 if filename == '/usr/lib/sendmail': 107 return True 108 return False
109
110 -def re_primary_dirname(dirname):
111 """ Tests if a dirname string, can be matched against just primary. """ 112 if 'bin/' in dirname: 113 return True 114 if dirname.startswith('/etc/'): 115 return True 116 return False
117 118 _re_compiled_full_match = None
119 -def re_full_search_needed(s):
120 """ Tests if a string needs a full nevra match, instead of just name. """ 121 global _re_compiled_full_match 122 if _re_compiled_full_match is None: 123 # A glob, or a "." or "-" separator, followed by something (the ".") 124 one = re.compile('.*([-.*?]|\[.+\]).').match 125 # Any epoch, for envra 126 two = re.compile('[0-9]+:').match 127 _re_compiled_full_match = (one, two) 128 for rec in _re_compiled_full_match: 129 if rec(s): 130 return True 131 return False
132
133 -def re_remote_url(s):
134 """ Tests if a string is a "remote" URL, http, https, ftp. """ 135 s = s.lower() 136 if s.startswith("http://"): 137 return True 138 if s.startswith("https://"): 139 return True 140 if s.startswith("ftp://"): 141 return True 142 return False
143
144 ########### 145 # Title: Remove duplicates from a sequence 146 # Submitter: Tim Peters 147 # From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/52560 148 -def unique(s):
149 """Return a list of the elements in s, but without duplicates. 150 151 For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], 152 unique("abcabc") some permutation of ["a", "b", "c"], and 153 unique(([1, 2], [2, 3], [1, 2])) some permutation of 154 [[2, 3], [1, 2]]. 155 156 For best speed, all sequence elements should be hashable. Then 157 unique() will usually work in linear time. 158 159 If not possible, the sequence elements should enjoy a total 160 ordering, and if list(s).sort() doesn't raise TypeError it's 161 assumed that they do enjoy a total ordering. Then unique() will 162 usually work in O(N*log2(N)) time. 163 164 If that's not possible either, the sequence elements must support 165 equality-testing. Then unique() will usually work in quadratic 166 time. 167 """ 168 169 n = len(s) 170 if n == 0: 171 return [] 172 173 # Try using a set first, as that's the fastest and will usually 174 # work. If it doesn't work, it will usually fail quickly, so it 175 # usually doesn't cost much to *try* it. It requires that all the 176 # sequence elements be hashable, and support equality comparison. 177 try: 178 u = set(s) 179 except TypeError: 180 pass 181 else: 182 return list(u) 183 184 # We can't hash all the elements. Second fastest is to sort, 185 # which brings the equal elements together; then duplicates are 186 # easy to weed out in a single pass. 187 # NOTE: Python's list.sort() was designed to be efficient in the 188 # presence of many duplicate elements. This isn't true of all 189 # sort functions in all languages or libraries, so this approach 190 # is more effective in Python than it may be elsewhere. 191 try: 192 t = list(s) 193 t.sort() 194 except TypeError: 195 del t # move on to the next method 196 else: 197 assert n > 0 198 last = t[0] 199 lasti = i = 1 200 while i < n: 201 if t[i] != last: 202 t[lasti] = last = t[i] 203 lasti += 1 204 i += 1 205 return t[:lasti] 206 207 # Brute force is all that's left. 208 u = [] 209 for x in s: 210 if x not in u: 211 u.append(x) 212 return u
213
214 -class Checksums:
215 """ Generate checksum(s), on given pieces of data. Producing the 216 Length and the result(s) when complete. """ 217
218 - def __init__(self, checksums=None, ignore_missing=False, ignore_none=False):
219 if checksums is None: 220 checksums = _default_checksums 221 self._sumalgos = [] 222 self._sumtypes = [] 223 self._len = 0 224 225 done = set() 226 for sumtype in checksums: 227 if sumtype == 'sha': 228 sumtype = 'sha1' 229 if sumtype in done: 230 continue 231 232 if sumtype in _available_checksums: 233 sumalgo = hashlib.new(sumtype) 234 elif ignore_missing: 235 continue 236 else: 237 raise MiscError, 'Error Checksumming, bad checksum type %s' % sumtype 238 done.add(sumtype) 239 self._sumtypes.append(sumtype) 240 self._sumalgos.append(sumalgo) 241 if not done and not ignore_none: 242 raise MiscError, 'Error Checksumming, no valid checksum type'
243
244 - def __len__(self):
245 return self._len
246
247 - def update(self, data):
248 self._len += len(data) 249 for sumalgo in self._sumalgos: 250 sumalgo.update(data)
251
252 - def read(self, fo, size=2**16):
253 data = fo.read(size) 254 self.update(data) 255 return data
256
257 - def hexdigests(self):
258 ret = {} 259 for sumtype, sumdata in zip(self._sumtypes, self._sumalgos): 260 ret[sumtype] = sumdata.hexdigest() 261 return ret
262
263 - def hexdigest(self, checksum=None):
264 if checksum is None: 265 if not self._sumtypes: 266 return None 267 checksum = self._sumtypes[0] 268 if checksum == 'sha': 269 checksum = 'sha1' 270 return self.hexdigests()[checksum]
271
272 - def digests(self):
273 ret = {} 274 for sumtype, sumdata in zip(self._sumtypes, self._sumalgos): 275 ret[sumtype] = sumdata.digest() 276 return ret
277
278 - def digest(self, checksum=None):
279 if checksum is None: 280 if not self._sumtypes: 281 return None 282 checksum = self._sumtypes[0] 283 if checksum == 'sha': 284 checksum = 'sha1' 285 return self.digests()[checksum]
286
287 288 -class AutoFileChecksums:
289 """ Generate checksum(s), on given file/fileobject. Pretending to be a file 290 object (overrrides read). """ 291
292 - def __init__(self, fo, checksums, ignore_missing=False, ignore_none=False):
293 self._fo = fo 294 self.checksums = Checksums(checksums, ignore_missing, ignore_none)
295
296 - def __getattr__(self, attr):
297 return getattr(self._fo, attr)
298
299 - def read(self, size=-1):
300 return self.checksums.read(self._fo, size)
301
302 303 -def checksum(sumtype, file, CHUNK=2**16, datasize=None):
304 """takes filename, hand back Checksum of it 305 sumtype = md5 or sha/sha1/sha256/sha512 (note sha == sha1) 306 filename = /path/to/file 307 CHUNK=65536 by default""" 308 309 # chunking brazenly lifted from Ryan Tomayko 310 try: 311 if type(file) not in types.StringTypes: 312 fo = file # assume it's a file-like-object 313 else: 314 fo = open(file, 'r', CHUNK) 315 316 data = Checksums([sumtype]) 317 while data.read(fo, CHUNK): 318 if datasize is not None and len(data) > datasize: 319 break 320 321 if type(file) is types.StringType: 322 fo.close() 323 del fo 324 325 # This screws up the length, but that shouldn't matter. We only care 326 # if this checksum == what we expect. 327 if datasize is not None and datasize != len(data): 328 return '!%u!%s' % (datasize, data.hexdigest(sumtype)) 329 330 return data.hexdigest(sumtype) 331 except (IOError, OSError), e: 332 raise MiscError, 'Error opening file for checksum: %s' % file
333
334 -def getFileList(path, ext, filelist):
335 """Return all files in path matching ext, store them in filelist, 336 recurse dirs return list object""" 337 338 extlen = len(ext) 339 try: 340 dir_list = os.listdir(path) 341 except OSError, e: 342 raise MiscError, ('Error accessing directory %s, %s') % (path, e) 343 344 for d in dir_list: 345 if os.path.isdir(path + '/' + d): 346 filelist = getFileList(path + '/' + d, ext, filelist) 347 else: 348 if not ext or d[-extlen:].lower() == '%s' % (ext): 349 newpath = os.path.normpath(path + '/' + d) 350 filelist.append(newpath) 351 352 return filelist
353
354 -class GenericHolder:
355 """Generic Holder class used to hold other objects of known types 356 It exists purely to be able to do object.somestuff, object.someotherstuff 357 or object[key] and pass object to another function that will 358 understand it""" 359
360 - def __init__(self, iter=None):
361 self.__iter = iter
362
363 - def __iter__(self):
364 if self.__iter is not None: 365 return iter(self[self.__iter])
366
367 - def __getitem__(self, item):
368 if hasattr(self, item): 369 return getattr(self, item) 370 else: 371 raise KeyError, item
372
373 -def procgpgkey(rawkey):
374 '''Convert ASCII armoured GPG key to binary 375 ''' 376 # TODO: CRC checking? (will RPM do this anyway?) 377 378 # Normalise newlines 379 rawkey = re.sub('\r\n?', '\n', rawkey) 380 381 # Extract block 382 block = StringIO() 383 inblock = 0 384 pastheaders = 0 385 for line in rawkey.split('\n'): 386 if line.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----'): 387 inblock = 1 388 elif inblock and line.strip() == '': 389 pastheaders = 1 390 elif inblock and line.startswith('-----END PGP PUBLIC KEY BLOCK-----'): 391 # Hit the end of the block, get out 392 break 393 elif pastheaders and line.startswith('='): 394 # Hit the CRC line, don't include this and stop 395 break 396 elif pastheaders: 397 block.write(line+'\n') 398 399 # Decode and return 400 return base64.decodestring(block.getvalue())
401
402 -def getgpgkeyinfo(rawkey, multiple=False):
403 '''Return a dict of info for the given ASCII armoured key text 404 405 Returned dict will have the following keys: 'userid', 'keyid', 'timestamp' 406 407 Will raise ValueError if there was a problem decoding the key. 408 ''' 409 # Catch all exceptions as there can be quite a variety raised by this call 410 key_info_objs = [] 411 try: 412 keys = pgpmsg.decode_multiple_keys(rawkey) 413 except Exception, e: 414 raise ValueError(str(e)) 415 if len(keys) == 0: 416 raise ValueError('No key found in given key data') 417 418 for key in keys: 419 keyid_blob = key.public_key.key_id() 420 421 info = { 422 'userid': key.user_id, 423 'keyid': struct.unpack('>Q', keyid_blob)[0], 424 'timestamp': key.public_key.timestamp, 425 'fingerprint' : key.public_key.fingerprint, 426 'raw_key' : key.raw_key, 427 } 428 429 # Retrieve the timestamp from the matching signature packet 430 # (this is what RPM appears to do) 431 for userid in key.user_ids[0]: 432 if not isinstance(userid, pgpmsg.signature): 433 continue 434 435 if userid.key_id() == keyid_blob: 436 # Get the creation time sub-packet if available 437 if hasattr(userid, 'hashed_subpaks'): 438 tspkt = \ 439 userid.get_hashed_subpak(pgpmsg.SIG_SUB_TYPE_CREATE_TIME) 440 if tspkt != None: 441 info['timestamp'] = int(tspkt[1]) 442 break 443 key_info_objs.append(info) 444 if multiple: 445 return key_info_objs 446 else: 447 return key_info_objs[0]
448
449 450 -def keyIdToRPMVer(keyid):
451 '''Convert an integer representing a GPG key ID to the hex version string 452 used by RPM 453 ''' 454 return "%08x" % (keyid & 0xffffffffL)
455
456 457 -def keyInstalled(ts, keyid, timestamp):
458 ''' 459 Return if the GPG key described by the given keyid and timestamp are 460 installed in the rpmdb. 461 462 The keyid and timestamp should both be passed as integers. 463 The ts is an rpm transaction set object 464 465 Return values: 466 - -1 key is not installed 467 - 0 key with matching ID and timestamp is installed 468 - 1 key with matching ID is installed but has a older timestamp 469 - 2 key with matching ID is installed but has a newer timestamp 470 471 No effort is made to handle duplicates. The first matching keyid is used to 472 calculate the return result. 473 ''' 474 # Convert key id to 'RPM' form 475 keyid = keyIdToRPMVer(keyid) 476 477 # Search 478 for hdr in ts.dbMatch('name', 'gpg-pubkey'): 479 if hdr['version'] == keyid: 480 installedts = int(hdr['release'], 16) 481 if installedts == timestamp: 482 return 0 483 elif installedts < timestamp: 484 return 1 485 else: 486 return 2 487 488 return -1
489
490 -def import_key_to_pubring(rawkey, keyid, cachedir=None, gpgdir=None):
491 # FIXME - cachedir can be removed from this method when we break api 492 if gpgme is None: 493 return False 494 495 if not gpgdir: 496 gpgdir = '%s/gpgdir' % cachedir 497 498 if not os.path.exists(gpgdir): 499 os.makedirs(gpgdir) 500 501 key_fo = StringIO(rawkey) 502 os.environ['GNUPGHOME'] = gpgdir 503 # import the key 504 ctx = gpgme.Context() 505 fp = open(os.path.join(gpgdir, 'gpg.conf'), 'wb') 506 fp.write('') 507 fp.close() 508 ctx.import_(key_fo) 509 key_fo.close() 510 # ultimately trust the key or pygpgme is definitionally stupid 511 k = ctx.get_key(keyid) 512 gpgme.editutil.edit_trust(ctx, k, gpgme.VALIDITY_ULTIMATE) 513 return True
514
515 -def return_keyids_from_pubring(gpgdir):
516 if gpgme is None or not os.path.exists(gpgdir): 517 return [] 518 519 os.environ['GNUPGHOME'] = gpgdir 520 ctx = gpgme.Context() 521 keyids = [] 522 for k in ctx.keylist(): 523 for subkey in k.subkeys: 524 if subkey.can_sign: 525 keyids.append(subkey.keyid) 526 527 return keyids
528
529 -def valid_detached_sig(sig_file, signed_file, gpghome=None):
530 """takes signature , file that was signed and an optional gpghomedir""" 531 532 if gpgme is None: 533 return False 534 535 if gpghome and os.path.exists(gpghome): 536 os.environ['GNUPGHOME'] = gpghome 537 538 sig = open(sig_file, 'r') 539 signed_text = open(signed_file, 'r') 540 plaintext = None 541 ctx = gpgme.Context() 542 543 try: 544 sigs = ctx.verify(sig, signed_text, plaintext) 545 except gpgme.GpgmeError, e: 546 return False 547 else: 548 # is there ever a case where we care about a sig beyond the first one? 549 thissig = sigs[0] 550 if not thissig: 551 return False 552 553 if thissig.validity in (gpgme.VALIDITY_FULL, gpgme.VALIDITY_MARGINAL, 554 gpgme.VALIDITY_ULTIMATE): 555 return True 556 557 return False
558
559 -def getCacheDir(tmpdir='/var/tmp', reuse=True):
560 """return a path to a valid and safe cachedir - only used when not running 561 as root or when --tempcache is set""" 562 563 uid = os.geteuid() 564 try: 565 usertup = pwd.getpwuid(uid) 566 username = usertup[0] 567 except KeyError: 568 return None # if it returns None then, well, it's bollocksed 569 570 prefix = 'yum-' 571 572 if reuse: 573 # check for /var/tmp/yum-username-* - 574 prefix = 'yum-%s-' % username 575 dirpath = '%s/%s*' % (tmpdir, prefix) 576 cachedirs = sorted(glob.glob(dirpath)) 577 for thisdir in cachedirs: 578 stats = os.lstat(thisdir) 579 if S_ISDIR(stats[0]) and S_IMODE(stats[0]) == 448 and stats[4] == uid: 580 return thisdir 581 582 # make the dir (tempfile.mkdtemp()) 583 cachedir = tempfile.mkdtemp(prefix=prefix, dir=tmpdir) 584 return cachedir
585
586 -def sortPkgObj(pkg1 ,pkg2):
587 """sorts a list of yum package objects by name""" 588 if pkg1.name > pkg2.name: 589 return 1 590 elif pkg1.name == pkg2.name: 591 return 0 592 else: 593 return -1
594
595 -def newestInList(pkgs):
596 """ Return the newest in the list of packages. """ 597 ret = [ pkgs.pop() ] 598 newest = ret[0] 599 for pkg in pkgs: 600 if pkg.verGT(newest): 601 ret = [ pkg ] 602 newest = pkg 603 elif pkg.verEQ(newest): 604 ret.append(pkg) 605 return ret
606
607 -def version_tuple_to_string(evrTuple):
608 """ 609 Convert a tuple representing a package version to a string. 610 611 @param evrTuple: A 3-tuple of epoch, version, and release. 612 613 Return the string representation of evrTuple. 614 """ 615 (e, v, r) = evrTuple 616 s = "" 617 618 if e not in [0, '0', None]: 619 s += '%s:' % e 620 if v is not None: 621 s += '%s' % v 622 if r is not None: 623 s += '-%s' % r 624 return s
625
626 -def prco_tuple_to_string(prcoTuple):
627 """returns a text string of the prco from the tuple format""" 628 629 (name, flag, evr) = prcoTuple 630 flags = {'GT':'>', 'GE':'>=', 'EQ':'=', 'LT':'<', 'LE':'<='} 631 if flag is None: 632 return name 633 634 return '%s %s %s' % (name, flags[flag], version_tuple_to_string(evr))
635
636 -def string_to_prco_tuple(prcoString):
637 """returns a prco tuple (name, flags, (e, v, r)) for a string""" 638 639 if type(prcoString) == types.TupleType: 640 (n, f, v) = prcoString 641 else: 642 n = prcoString 643 f = v = None 644 645 if n[0] != '/': 646 # not a file dep - look at it for being versioned 647 prco_split = n.split() 648 if len(prco_split) == 3: 649 n, f, v = prco_split 650 651 # now we have 'n, f, v' where f and v could be None and None 652 if f is not None and f not in constants.LETTERFLAGS: 653 if f not in constants.SYMBOLFLAGS: 654 try: 655 f = flagToString(int(f)) 656 except (ValueError,TypeError), e: 657 raise Errors.MiscError, 'Invalid version flag: %s' % f 658 else: 659 f = constants.SYMBOLFLAGS[f] 660 661 if type(v) in (types.StringType, types.NoneType, types.UnicodeType): 662 (prco_e, prco_v, prco_r) = stringToVersion(v) 663 elif type(v) in (types.TupleType, types.ListType): 664 (prco_e, prco_v, prco_r) = v 665 666 #now we have (n, f, (e, v, r)) for the thing specified 667 return (n, f, (prco_e, prco_v, prco_r))
668
669 -def refineSearchPattern(arg):
670 """Takes a search string from the cli for Search or Provides 671 and cleans it up so it doesn't make us vomit""" 672 673 if re.search('[*{}?+]|\[.+\]', arg): 674 restring = fnmatch.translate(arg) 675 else: 676 restring = re.escape(arg) 677 678 return restring
679
680 -def bunzipFile(source,dest):
681 """ Extract the bzipped contents of source to dest. """ 682 s_fn = bz2.BZ2File(source, 'r') 683 destination = open(dest, 'w') 684 685 while True: 686 try: 687 data = s_fn.read(1024000) 688 except IOError: 689 break 690 691 if not data: break 692 693 try: 694 destination.write(data) 695 except (OSError, IOError), e: 696 msg = "Error writing to file %s: %s" % (dest, str(e)) 697 raise Errors.MiscError, msg 698 699 destination.close() 700 s_fn.close()
701
702 -def get_running_kernel_pkgtup(ts):
703 """This takes the output of uname and figures out the pkgtup of the running 704 kernel (name, arch, epoch, version, release).""" 705 ver = os.uname()[2] 706 707 # we glob for the file that MIGHT have this kernel 708 # and then look up the file in our rpmdb. 709 fns = sorted(glob.glob('/boot/vmlinuz*%s*' % ver)) 710 for fn in fns: 711 mi = ts.dbMatch('basenames', fn) 712 for h in mi: 713 e = h['epoch'] 714 if h['epoch'] is None: 715 e = '0' 716 return (h['name'], h['arch'], e, h['version'], h['release']) 717 718 return (None, None, None, None, None)
719
720 -def get_running_kernel_version_release(ts):
721 """This takes the output of uname and figures out the (version, release) 722 tuple for the running kernel.""" 723 pkgtup = get_running_kernel_pkgtup(ts) 724 if pkgtup[0] is not None: 725 return (pkgtup[3], pkgtup[4]) 726 return (None, None)
727
728 -def find_unfinished_transactions(yumlibpath='/var/lib/yum'):
729 """returns a list of the timestamps from the filenames of the unfinished 730 transactions remaining in the yumlibpath specified. 731 """ 732 timestamps = [] 733 tsallg = '%s/%s' % (yumlibpath, 'transaction-all*') 734 tsdoneg = '%s/%s' % (yumlibpath, 'transaction-done*') 735 tsalls = glob.glob(tsallg) 736 tsdones = glob.glob(tsdoneg) 737 738 for fn in tsalls: 739 if fn.endswith('disabled'): 740 continue 741 trans = os.path.basename(fn) 742 timestamp = trans.replace('transaction-all.','') 743 timestamps.append(timestamp) 744 745 timestamps.sort() 746 return timestamps
747
748 -def find_ts_remaining(timestamp, yumlibpath='/var/lib/yum'):
749 """this function takes the timestamp of the transaction to look at and 750 the path to the yum lib dir (defaults to /var/lib/yum) 751 returns a list of tuples(action, pkgspec) for the unfinished transaction 752 elements. Returns an empty list if none. 753 754 """ 755 756 to_complete_items = [] 757 tsallpath = '%s/%s.%s' % (yumlibpath, 'transaction-all', timestamp) 758 tsdonepath = '%s/%s.%s' % (yumlibpath,'transaction-done', timestamp) 759 tsdone_items = [] 760 761 if not os.path.exists(tsallpath): 762 # something is wrong, here, probably need to raise _something_ 763 return to_complete_items 764 765 766 if os.path.exists(tsdonepath): 767 tsdone_fo = open(tsdonepath, 'r') 768 tsdone_items = tsdone_fo.readlines() 769 tsdone_fo.close() 770 771 tsall_fo = open(tsallpath, 'r') 772 tsall_items = tsall_fo.readlines() 773 tsall_fo.close() 774 775 for item in tsdone_items: 776 # this probably shouldn't happen but it's worth catching anyway 777 if item not in tsall_items: 778 continue 779 tsall_items.remove(item) 780 781 for item in tsall_items: 782 item = item.replace('\n', '') 783 if item == '': 784 continue 785 (action, pkgspec) = item.split() 786 to_complete_items.append((action, pkgspec)) 787 788 return to_complete_items
789
790 -def seq_max_split(seq, max_entries):
791 """ Given a seq, split into a list of lists of length max_entries each. """ 792 ret = [] 793 num = len(seq) 794 seq = list(seq) # Trying to use a set/etc. here is bad 795 beg = 0 796 while num > max_entries: 797 end = beg + max_entries 798 ret.append(seq[beg:end]) 799 beg += max_entries 800 num -= max_entries 801 ret.append(seq[beg:]) 802 return ret
803
804 -def _ugly_utf8_string_hack(item):
805 """hands back a unicoded string""" 806 # this is backward compat for handling non-utf8 filenames 807 # and content inside packages. :( 808 # content that xml can cope with but isn't really kosher 809 810 # if we're anything obvious - do them first 811 if item is None: 812 return '' 813 elif isinstance(item, unicode): 814 return item 815 816 # this handles any bogon formats we see 817 du = False 818 try: 819 x = unicode(item, 'ascii') 820 du = True 821 except UnicodeError: 822 encodings = ['utf-8', 'iso-8859-1', 'iso-8859-15', 'iso-8859-2'] 823 for enc in encodings: 824 try: 825 x = unicode(item, enc) 826 except UnicodeError: 827 pass 828 829 else: 830 if x.encode(enc) == item: 831 if enc != 'utf-8': 832 print '\n%s encoding on %s\n' % (enc, item) 833 return x.encode('utf-8') 834 835 836 # Kill bytes (or libxml will die) not in the small byte portion of: 837 # http://www.w3.org/TR/REC-xml/#NT-Char 838 # we allow high bytes, if it passed the utf8 check above. Eg. 839 # good chars = #x9 | #xA | #xD | [#x20-...] 840 newitem = '' 841 bad_small_bytes = range(0, 8) + [11, 12] + range(14, 32) 842 for char in item: 843 if ord(char) in bad_small_bytes: 844 pass # Just ignore these bytes... 845 elif not du and ord(char) > 127: 846 newitem = newitem + '?' # byte by byte equiv of escape 847 else: 848 newitem = newitem + char 849 return newitem
850
851 -def to_xml(item, attrib=False):
852 import xml.sax.saxutils 853 item = _ugly_utf8_string_hack(item) 854 item = to_utf8(item) 855 item = item.rstrip() 856 if attrib: 857 item = xml.sax.saxutils.escape(item, entities={'"':"&quot;"}) 858 else: 859 item = xml.sax.saxutils.escape(item) 860 return item
861 870
871 -def getloginuid():
872 """ Get the audit-uid/login-uid, if available. None is returned if there 873 was a problem. Note that no caching is done here. """ 874 # We might normally call audit.audit_getloginuid(), except that requires 875 # importing all of the audit module. And it doesn't work anyway: BZ 518721 876 try: 877 fo = open("/proc/self/loginuid") 878 except IOError: 879 return None 880 data = fo.read() 881 try: 882 return int(data) 883 except ValueError: 884 return None
885 886 # ---------- i18n ---------- 887 import locale 888 import sys
889 -def setup_locale(override_codecs=True, override_time=False):
890 # This test needs to be before locale.getpreferredencoding() as that 891 # does setlocale(LC_CTYPE, "") 892 try: 893 locale.setlocale(locale.LC_ALL, '') 894 # set time to C so that we output sane things in the logs (#433091) 895 if override_time: 896 locale.setlocale(locale.LC_TIME, 'C') 897 except locale.Error, e: 898 # default to C locale if we get a failure. 899 print >> sys.stderr, 'Failed to set locale, defaulting to C' 900 os.environ['LC_ALL'] = 'C' 901 locale.setlocale(locale.LC_ALL, 'C') 902 903 if override_codecs: 904 import codecs 905 sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout) 906 sys.stdout.errors = 'replace'
907
908 909 -def get_my_lang_code():
910 mylang = locale.getlocale(locale.LC_MESSAGES) 911 if mylang == (None, None): # odd :) 912 mylang = 'C' 913 else: 914 mylang = '.'.join(mylang) 915 916 return mylang
917
918 -def return_running_pids():
919 """return list of running processids, excluding this one""" 920 mypid = os.getpid() 921 pids = [] 922 for fn in glob.glob('/proc/[0123456789]*'): 923 if mypid == os.path.basename(fn): 924 continue 925 pids.append(os.path.basename(fn)) 926 return pids
927
928 -def get_open_files(pid):
929 """returns files open from this pid""" 930 files = [] 931 maps_f = '/proc/%s/maps' % pid 932 try: 933 maps = open(maps_f, 'r') 934 except (IOError, OSError), e: 935 return files 936 937 for line in maps: 938 if line.find('fd:') == -1: 939 continue 940 line = line.replace('\n', '') 941 slash = line.find('/') 942 filename = line[slash:] 943 filename = filename.replace('(deleted)', '') #only mildly retarded 944 filename = filename.strip() 945 if filename not in files: 946 files.append(filename) 947 948 cli_f = '/proc/%s/cmdline' % pid 949 try: 950 cli = open(cli_f, 'r') 951 except (IOError, OSError), e: 952 return files 953 954 cmdline = cli.read() 955 if cmdline.find('\00') != -1: 956 cmds = cmdline.split('\00') 957 for i in cmds: 958 if i.startswith('/'): 959 files.append(i) 960 961 return files
962
963 -def get_uuid(savepath):
964 """create, store and return a uuid. If a stored one exists, report that 965 if it cannot be stored, return a random one""" 966 if os.path.exists(savepath): 967 return open(savepath, 'r').read() 968 else: 969 try: 970 from uuid import uuid4 971 except ImportError: 972 myid = open('/proc/sys/kernel/random/uuid', 'r').read() 973 else: 974 myid = str(uuid4()) 975 976 try: 977 sf = open(savepath, 'w') 978 sf.write(myid) 979 sf.flush() 980 sf.close() 981 except (IOError, OSError), e: 982 pass 983 984 return myid
985
986 -def decompress(filename):
987 """take a filename and decompress it into the same relative location. 988 if the file is not compressed just return the file""" 989 out = filename 990 if filename.endswith('.gz'): 991 out = filename.replace('.gz', '') 992 decom = gzip.open(filename) 993 fo = open(out, 'w') 994 fo.write(decom.read()) 995 fo.flush() 996 fo.close() 997 decom.close() 998 elif filename.endswith('.bz') or filename.endswith('.bz2'): 999 if filename.endswith('.bz'): 1000 out = filename.replace('.bz','') 1001 else: 1002 out = filename.replace('.bz2', '') 1003 bunzipFile(filename, out) 1004 1005 #add magical lzma/xz trick here 1006 1007 return out
1008