Package yum
[hide private]
[frames] | no frames]

Source Code for Package yum

   1  #!/usr/bin/python -tt 
   2  # This program is free software; you can redistribute it and/or modify 
   3  # it under the terms of the GNU General Public License as published by 
   4  # the Free Software Foundation; either version 2 of the License, or 
   5  # (at your option) any later version. 
   6  # 
   7  # This program is distributed in the hope that it will be useful, 
   8  # but WITHOUT ANY WARRANTY; without even the implied warranty of 
   9  # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the 
  10  # GNU Library General Public License for more details. 
  11  # 
  12  # You should have received a copy of the GNU General Public License 
  13  # along with this program; if not, write to the Free Software 
  14  # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 
  15  # Copyright 2005 Duke University 
  16   
  17  """ 
  18  The Yum RPM software updater. 
  19  """ 
  20   
  21  import os 
  22  import os.path 
  23  import rpm 
  24  import re 
  25  import types 
  26  import errno 
  27  import time 
  28  import glob 
  29  import fnmatch 
  30  import logging 
  31  import logging.config 
  32  import operator 
  33  import gzip 
  34   
  35  import yum.i18n 
  36  _ = yum.i18n._ 
  37  P_ = yum.i18n.P_ 
  38   
  39  import config 
  40  from config import ParsingError, ConfigParser 
  41  import Errors 
  42  import rpmsack 
  43  import rpmUtils.updates 
  44  from rpmUtils.arch import canCoinstall, ArchStorage, isMultiLibArch 
  45  import rpmUtils.transaction 
  46  import comps 
  47  import pkgtag_db 
  48  from repos import RepoStorage 
  49  import misc 
  50  from parser import ConfigPreProcessor, varReplace 
  51  import transactioninfo 
  52  import urlgrabber 
  53  from urlgrabber.grabber import URLGrabber, URLGrabError 
  54  from urlgrabber.progress import format_number 
  55  from packageSack import packagesNewestByName, packagesNewestByNameArch, ListPackageSack 
  56  import depsolve 
  57  import plugins 
  58  import logginglevels 
  59  import yumRepo 
  60  import callbacks 
  61  import yum.history 
  62   
  63  import warnings 
  64  warnings.simplefilter("ignore", Errors.YumFutureDeprecationWarning) 
  65   
  66  from packages import parsePackages, comparePoEVR 
  67  from packages import YumAvailablePackage, YumLocalPackage, YumInstalledPackage 
  68  from packages import YumUrlPackage 
  69  from constants import * 
  70  from yum.rpmtrans import RPMTransaction,SimpleCliCallBack 
  71  from yum.i18n import to_unicode, to_str 
  72   
  73  import string 
  74   
  75  from weakref import proxy as weakref 
  76   
  77  from urlgrabber.grabber import default_grabber 
  78   
  79  __version__ = '3.2.27' 
  80  __version_info__ = tuple([ int(num) for num in __version__.split('.')]) 
  81   
  82  #  Setup a default_grabber UA here that says we are yum, done using the global 
  83  # so that other API users can easily add to it if they want. 
  84  #  Don't do it at init time, or we'll get multiple additions if you create 
  85  # multiple YumBase() objects. 
  86  default_grabber.opts.user_agent += " yum/" + __version__ 
  87   
88 -class _YumPreBaseConf:
89 """This is the configuration interface for the YumBase configuration. 90 So if you want to change if plugins are on/off, or debuglevel/etc. 91 you tweak it here, and when yb.conf does it's thing ... it happens. """ 92
93 - def __init__(self):
94 self.fn = '/etc/yum/yum.conf' 95 self.root = '/' 96 self.init_plugins = True 97 self.plugin_types = (plugins.TYPE_CORE,) 98 self.optparser = None 99 self.debuglevel = None 100 self.errorlevel = None 101 self.disabled_plugins = None 102 self.enabled_plugins = None 103 self.syslog_ident = None 104 self.syslog_facility = None 105 self.syslog_device = '/dev/log' 106 self.arch = None 107 self.releasever = None 108 self.uuid = None
109
110 -class _YumCostExclude:
111 """ This excludes packages that are in repos. of lower cost than the passed 112 repo. """ 113
114 - def __init__(self, repo, repos):
115 self.repo = weakref(repo) 116 self._repos = weakref(repos)
117
118 - def __contains__(self, pkgtup):
119 # (n, a, e, v, r) = pkgtup 120 for repo in self._repos.listEnabled(): 121 if repo.cost >= self.repo.cost: 122 break 123 # searchNevra is a bit slower, although more generic for repos. 124 # that don't use sqlitesack as the backend ... although they are 125 # probably screwed anyway. 126 # 127 # if repo.sack.searchNevra(n, e, v, r, a): 128 if pkgtup in repo.sack._pkgtup2pkgs: 129 return True 130 return False
131
132 -class YumBase(depsolve.Depsolve):
133 """This is a primary structure and base class. It houses the objects and 134 methods needed to perform most things in yum. It is almost an abstract 135 class in that you will need to add your own class above it for most 136 real use.""" 137
138 - def __init__(self):
139 depsolve.Depsolve.__init__(self) 140 self._conf = None 141 self._tsInfo = None 142 self._rpmdb = None 143 self._up = None 144 self._comps = None 145 self._history = None 146 self._pkgSack = None 147 self._lockfile = None 148 self._tags = None 149 self.skipped_packages = [] # packages skip by the skip-broken code 150 self.logger = logging.getLogger("yum.YumBase") 151 self.verbose_logger = logging.getLogger("yum.verbose.YumBase") 152 self._repos = RepoStorage(self) 153 154 # Start with plugins disabled 155 self.disablePlugins() 156 157 self.localPackages = [] # for local package handling 158 159 self.mediagrabber = None 160 self.arch = ArchStorage() 161 self.preconf = _YumPreBaseConf() 162 163 self.run_with_package_names = set()
164
165 - def __del__(self):
166 self.close() 167 self.closeRpmDB() 168 self.doUnlock()
169
170 - def close(self):
171 # We don't want to create the object, so we test if it's been created 172 if self._history is not None: 173 self.history.close() 174 175 if self._repos: 176 self._repos.close()
177
178 - def _transactionDataFactory(self):
179 """Factory method returning TransactionData object""" 180 return transactioninfo.TransactionData()
181
182 - def doGenericSetup(self, cache=0):
183 """do a default setup for all the normal/necessary yum components, 184 really just a shorthand for testing""" 185 186 self.preconf.init_plugins = False 187 self.conf.cache = cache
188
189 - def doConfigSetup(self, fn='/etc/yum/yum.conf', root='/', init_plugins=True, 190 plugin_types=(plugins.TYPE_CORE,), optparser=None, debuglevel=None, 191 errorlevel=None):
192 warnings.warn(_('doConfigSetup() will go away in a future version of Yum.\n'), 193 Errors.YumFutureDeprecationWarning, stacklevel=2) 194 195 if hasattr(self, 'preconf'): 196 self.preconf.fn = fn 197 self.preconf.root = root 198 self.preconf.init_plugins = init_plugins 199 self.preconf.plugin_types = plugin_types 200 self.preconf.optparser = optparser 201 self.preconf.debuglevel = debuglevel 202 self.preconf.errorlevel = errorlevel 203 204 return self.conf
205
206 - def _getConfig(self, **kwargs):
207 ''' 208 Parse and load Yum's configuration files and call hooks initialise 209 plugins and logging. Uses self.preconf for pre-configuration, 210 configuration. ''' 211 212 # ' xemacs syntax hack 213 214 if kwargs: 215 warnings.warn('Use .preconf instead of passing args to _getConfig') 216 217 if self._conf: 218 return self._conf 219 conf_st = time.time() 220 221 if kwargs: 222 for arg in ('fn', 'root', 'init_plugins', 'plugin_types', 223 'optparser', 'debuglevel', 'errorlevel', 224 'disabled_plugins', 'enabled_plugins'): 225 if arg in kwargs: 226 setattr(self.preconf, arg, kwargs[arg]) 227 228 fn = self.preconf.fn 229 root = self.preconf.root 230 init_plugins = self.preconf.init_plugins 231 plugin_types = self.preconf.plugin_types 232 optparser = self.preconf.optparser 233 debuglevel = self.preconf.debuglevel 234 errorlevel = self.preconf.errorlevel 235 disabled_plugins = self.preconf.disabled_plugins 236 enabled_plugins = self.preconf.enabled_plugins 237 syslog_ident = self.preconf.syslog_ident 238 syslog_facility = self.preconf.syslog_facility 239 syslog_device = self.preconf.syslog_device 240 releasever = self.preconf.releasever 241 arch = self.preconf.arch 242 uuid = self.preconf.uuid 243 244 if arch: # if preconf is setting an arch we need to pass that up 245 self.arch.setup_arch(arch) 246 else: 247 arch = self.arch.canonarch 248 249 # TODO: Remove this block when we no longer support configs outside 250 # of /etc/yum/ 251 if fn == '/etc/yum/yum.conf' and not os.path.exists(fn): 252 # Try the old default 253 fn = '/etc/yum.conf' 254 255 startupconf = config.readStartupConfig(fn, root) 256 startupconf.arch = arch 257 startupconf.basearch = self.arch.basearch 258 if uuid: 259 startupconf.uuid = uuid 260 261 if startupconf.gaftonmode: 262 global _ 263 _ = yum.i18n.dummy_wrapper 264 265 if debuglevel != None: 266 startupconf.debuglevel = debuglevel 267 if errorlevel != None: 268 startupconf.errorlevel = errorlevel 269 if syslog_ident != None: 270 startupconf.syslog_ident = syslog_ident 271 if syslog_facility != None: 272 startupconf.syslog_facility = syslog_facility 273 if releasever != None: 274 startupconf.releasever = releasever 275 276 self.doLoggingSetup(startupconf.debuglevel, startupconf.errorlevel, 277 startupconf.syslog_ident, 278 startupconf.syslog_facility, syslog_device) 279 280 if init_plugins and startupconf.plugins: 281 self.doPluginSetup(optparser, plugin_types, startupconf.pluginpath, 282 startupconf.pluginconfpath,disabled_plugins,enabled_plugins) 283 284 self._conf = config.readMainConfig(startupconf) 285 286 # We don't want people accessing/altering preconf after it becomes 287 # worthless. So we delete it, and thus. it'll raise AttributeError 288 del self.preconf 289 290 # Packages used to run yum... 291 for pkgname in self.conf.history_record_packages: 292 self.run_with_package_names.add(pkgname) 293 294 # run the postconfig plugin hook 295 self.plugins.run('postconfig') 296 # Note that Pungi has historically replaced _getConfig(), and it sets 297 # up self.conf.yumvar but not self.yumvar ... and AFAIK nothing needs 298 # to use YumBase.yumvar, so it's probably easier to just semi-deprecate 299 # this (core now only uses YumBase.conf.yumvar). 300 self.yumvar = self.conf.yumvar 301 302 self.getReposFromConfig() 303 304 # who are we: 305 self.conf.uid = os.geteuid() 306 307 308 self.doFileLogSetup(self.conf.uid, self.conf.logfile) 309 self.verbose_logger.debug('Config time: %0.3f' % (time.time() - conf_st)) 310 self.plugins.run('init') 311 return self._conf
312 313
314 - def doLoggingSetup(self, debuglevel, errorlevel, 315 syslog_ident=None, syslog_facility=None, 316 syslog_device='/dev/log'):
317 ''' 318 Perform logging related setup. 319 320 @param debuglevel: Debug logging level to use. 321 @param errorlevel: Error logging level to use. 322 ''' 323 logginglevels.doLoggingSetup(debuglevel, errorlevel, 324 syslog_ident, syslog_facility, 325 syslog_device)
326
327 - def doFileLogSetup(self, uid, logfile):
329
330 - def getReposFromConfigFile(self, repofn, repo_age=None, validate=None):
331 """read in repositories from a config .repo file""" 332 333 if repo_age is None: 334 repo_age = os.stat(repofn)[8] 335 336 confpp_obj = ConfigPreProcessor(repofn, vars=self.conf.yumvar) 337 parser = ConfigParser() 338 try: 339 parser.readfp(confpp_obj) 340 except ParsingError, e: 341 msg = str(e) 342 raise Errors.ConfigError, msg 343 344 # Check sections in the .repo file that was just slurped up 345 for section in parser.sections(): 346 347 if section in ['main', 'installed']: 348 continue 349 350 # Check the repo.id against the valid chars 351 bad = None 352 for byte in section: 353 if byte in string.ascii_letters: 354 continue 355 if byte in string.digits: 356 continue 357 if byte in "-_.:": 358 continue 359 360 bad = byte 361 break 362 363 if bad: 364 self.logger.warning("Bad id for repo: %s, byte = %s %d" % 365 (section, bad, section.find(bad))) 366 continue 367 368 try: 369 thisrepo = self.readRepoConfig(parser, section) 370 except (Errors.RepoError, Errors.ConfigError), e: 371 self.logger.warning(e) 372 continue 373 else: 374 thisrepo.repo_config_age = repo_age 375 thisrepo.repofile = repofn 376 377 if validate and not validate(thisrepo): 378 continue 379 380 # Got our list of repo objects, add them to the repos 381 # collection 382 try: 383 self._repos.add(thisrepo) 384 except Errors.RepoError, e: 385 self.logger.warning(e)
386
387 - def getReposFromConfig(self):
388 """read in repositories from config main and .repo files""" 389 390 # Read .repo files from directories specified by the reposdir option 391 # (typically /etc/yum/repos.d) 392 repo_config_age = self.conf.config_file_age 393 394 # Get the repos from the main yum.conf file 395 self.getReposFromConfigFile(self.conf.config_file_path, repo_config_age) 396 397 for reposdir in self.conf.reposdir: 398 # this check makes sure that our dirs exist properly. 399 # if they aren't in the installroot then don't prepend the installroot path 400 # if we don't do this then anaconda likes to not work. 401 if os.path.exists(self.conf.installroot+'/'+reposdir): 402 reposdir = self.conf.installroot + '/' + reposdir 403 404 if os.path.isdir(reposdir): 405 for repofn in sorted(glob.glob('%s/*.repo' % reposdir)): 406 thisrepo_age = os.stat(repofn)[8] 407 if thisrepo_age < repo_config_age: 408 thisrepo_age = repo_config_age 409 self.getReposFromConfigFile(repofn, repo_age=thisrepo_age)
410
411 - def readRepoConfig(self, parser, section):
412 '''Parse an INI file section for a repository. 413 414 @param parser: ConfParser or similar to read INI file values from. 415 @param section: INI file section to read. 416 @return: YumRepository instance. 417 ''' 418 repo = yumRepo.YumRepository(section) 419 repo.populate(parser, section, self.conf) 420 421 # Ensure that the repo name is set 422 if not repo.name: 423 repo.name = section 424 self.logger.error(_('Repository %r is missing name in configuration, ' 425 'using id') % section) 426 repo.name = to_unicode(repo.name) 427 428 # Set attributes not from the config file 429 repo.basecachedir = self.conf.cachedir 430 repo.yumvar.update(self.conf.yumvar) 431 repo.cfg = parser 432 433 return repo
434
435 - def disablePlugins(self):
436 '''Disable yum plugins 437 ''' 438 self.plugins = plugins.DummyYumPlugins()
439
440 - def doPluginSetup(self, optparser=None, plugin_types=None, searchpath=None, 441 confpath=None,disabled_plugins=None,enabled_plugins=None):
442 '''Initialise and enable yum plugins. 443 444 Note: _getConfig() will initialise plugins if instructed to. Only 445 call this method directly if not calling _getConfig() or calling 446 doConfigSetup(init_plugins=False). 447 448 @param optparser: The OptionParser instance for this run (optional) 449 @param plugin_types: A sequence specifying the types of plugins to load. 450 This should be a sequence containing one or more of the 451 yum.plugins.TYPE_... constants. If None (the default), all plugins 452 will be loaded. 453 @param searchpath: A list of directories to look in for plugins. A 454 default will be used if no value is specified. 455 @param confpath: A list of directories to look in for plugin 456 configuration files. A default will be used if no value is 457 specified. 458 @param disabled_plugins: Plugins to be disabled 459 @param enabled_plugins: Plugins to be enabled 460 ''' 461 if isinstance(self.plugins, plugins.YumPlugins): 462 raise RuntimeError(_("plugins already initialised")) 463 464 self.plugins = plugins.YumPlugins(self, searchpath, optparser, 465 plugin_types, confpath, disabled_plugins, enabled_plugins)
466 467
468 - def doRpmDBSetup(self):
469 warnings.warn(_('doRpmDBSetup() will go away in a future version of Yum.\n'), 470 Errors.YumFutureDeprecationWarning, stacklevel=2) 471 472 return self._getRpmDB()
473
474 - def _getRpmDB(self):
475 """sets up a holder object for important information from the rpmdb""" 476 477 if self._rpmdb is None: 478 rpmdb_st = time.time() 479 self.verbose_logger.log(logginglevels.DEBUG_4, 480 _('Reading Local RPMDB')) 481 self._rpmdb = rpmsack.RPMDBPackageSack(root=self.conf.installroot, 482 releasever=self.conf.yumvar['releasever'], 483 persistdir=self.conf.persistdir, 484 cachedir=self.conf.cachedir) 485 self.verbose_logger.debug('rpmdb time: %0.3f' % (time.time() - rpmdb_st)) 486 return self._rpmdb
487
488 - def closeRpmDB(self):
489 """closes down the instances of the rpmdb we have wangling around""" 490 if self._rpmdb is not None: 491 self._rpmdb.ts = None 492 self._rpmdb.dropCachedData() 493 self._rpmdb = None 494 self._ts = None 495 self._tsInfo = None 496 self._up = None 497 self.comps = None
498
499 - def _deleteTs(self):
500 del self._ts 501 self._ts = None
502
503 - def doRepoSetup(self, thisrepo=None):
504 warnings.warn(_('doRepoSetup() will go away in a future version of Yum.\n'), 505 Errors.YumFutureDeprecationWarning, stacklevel=2) 506 507 return self._getRepos(thisrepo, True)
508
509 - def _getRepos(self, thisrepo=None, doSetup = False):
510 """ For each enabled repository set up the basics of the repository. """ 511 self.conf # touch the config class first 512 513 if doSetup: 514 repo_st = time.time() 515 self._repos.doSetup(thisrepo) 516 self.verbose_logger.debug('repo time: %0.3f' % (time.time() - repo_st)) 517 return self._repos
518
519 - def _delRepos(self):
520 del self._repos 521 self._repos = RepoStorage(self)
522
523 - def doSackSetup(self, archlist=None, thisrepo=None):
524 warnings.warn(_('doSackSetup() will go away in a future version of Yum.\n'), 525 Errors.YumFutureDeprecationWarning, stacklevel=2) 526 527 return self._getSacks(archlist=archlist, thisrepo=thisrepo)
528
529 - def _getSacks(self, archlist=None, thisrepo=None):
530 """populates the package sacks for information from our repositories, 531 takes optional archlist for archs to include""" 532 533 # FIXME: Fist of death ... normally we'd do either: 534 # 535 # 1. use self._pkgSack is not None, and only init. once. 536 # 2. auto. correctly re-init each time a repo is added/removed 537 # 538 # ...we should probably just smeg it and do #2, but it's hard and will 539 # probably break something (but it'll "fix" excludes). 540 # #1 can't be done atm. because we did self._pkgSack and external 541 # tools now rely on being able to create an empty sack and then have it 542 # auto. re-init when they add some stuff. So we add a bit more "clever" 543 # and don't setup the pkgSack to not be None when it's empty. This means 544 # we skip excludes/includes/etc. ... but there's no packages, so 545 # hopefully that's ok. 546 if self._pkgSack is not None and thisrepo is None: 547 return self._pkgSack 548 549 if thisrepo is None: 550 repos = 'enabled' 551 else: 552 repos = self.repos.findRepos(thisrepo) 553 554 self.verbose_logger.debug(_('Setting up Package Sacks')) 555 sack_st = time.time() 556 if not archlist: 557 archlist = self.arch.archlist 558 559 archdict = {} 560 for arch in archlist: 561 archdict[arch] = 1 562 563 self.repos.getPackageSack().setCompatArchs(archdict) 564 self.repos.populateSack(which=repos) 565 if not self.repos.getPackageSack(): 566 return self.repos.getPackageSack() # ha ha, see above 567 self._pkgSack = self.repos.getPackageSack() 568 569 self.excludePackages() 570 self._pkgSack.excludeArchs(archlist) 571 572 #FIXME - this could be faster, too. 573 if repos == 'enabled': 574 repos = self.repos.listEnabled() 575 for repo in repos: 576 self.includePackages(repo) 577 self.excludePackages(repo) 578 self.plugins.run('exclude') 579 self._pkgSack.buildIndexes() 580 581 # now go through and kill pkgs based on pkg.repo.cost() 582 self.costExcludePackages() 583 self.verbose_logger.debug('pkgsack time: %0.3f' % (time.time() - sack_st)) 584 return self._pkgSack
585 586
587 - def _delSacks(self):
588 """reset the package sacks back to zero - making sure to nuke the ones 589 in the repo objects, too - where it matters""" 590 591 # nuke the top layer 592 593 self._pkgSack = None 594 595 for repo in self.repos.repos.values(): 596 if hasattr(repo, '_resetSack'): 597 repo._resetSack() 598 else: 599 warnings.warn(_('repo object for repo %s lacks a _resetSack method\n') + 600 _('therefore this repo cannot be reset.\n'), 601 Errors.YumFutureDeprecationWarning, stacklevel=2)
602 603
604 - def doUpdateSetup(self):
605 warnings.warn(_('doUpdateSetup() will go away in a future version of Yum.\n'), 606 Errors.YumFutureDeprecationWarning, stacklevel=2) 607 608 return self._getUpdates()
609
610 - def _getUpdates(self):
611 """setups up the update object in the base class and fills out the 612 updates, obsoletes and others lists""" 613 614 if self._up: 615 return self._up 616 617 self.verbose_logger.debug(_('Building updates object')) 618 619 up_st = time.time() 620 621 self._up = rpmUtils.updates.Updates(self.rpmdb.simplePkgList(), self.pkgSack.simplePkgList()) 622 if self.conf.debuglevel >= 7: 623 self._up.debug = 1 624 625 if self.conf.obsoletes: 626 obs_init = time.time() 627 # Note: newest=True here is semi-required for repos. with multiple 628 # versions. The problem is that if pkgA-2 _accidentally_ obsoletes 629 # pkgB-1, and we keep all versions, we want to release a pkgA-3 630 # that doesn't do the obsoletes ... and thus. not obsolete pkgB-1. 631 self._up.rawobsoletes = self.pkgSack.returnObsoletes(newest=True) 632 self.verbose_logger.debug('up:Obs Init time: %0.3f' % (time.time() - obs_init)) 633 634 self._up.myarch = self.arch.canonarch 635 self._up._is_multilib = self.arch.multilib 636 self._up._archlist = self.arch.archlist 637 self._up._multilib_compat_arches = self.arch.compatarches 638 self._up.exactarch = self.conf.exactarch 639 self._up.exactarchlist = self.conf.exactarchlist 640 up_pr_st = time.time() 641 self._up.doUpdates() 642 self.verbose_logger.debug('up:simple updates time: %0.3f' % (time.time() - up_pr_st)) 643 644 if self.conf.obsoletes: 645 obs_st = time.time() 646 self._up.doObsoletes() 647 self.verbose_logger.debug('up:obs time: %0.3f' % (time.time() - obs_st)) 648 649 cond_up_st = time.time() 650 self._up.condenseUpdates() 651 self.verbose_logger.debug('up:condense time: %0.3f' % (time.time() - cond_up_st)) 652 self.verbose_logger.debug('updates time: %0.3f' % (time.time() - up_st)) 653 return self._up
654
655 - def doGroupSetup(self):
656 warnings.warn(_('doGroupSetup() will go away in a future version of Yum.\n'), 657 Errors.YumFutureDeprecationWarning, stacklevel=2) 658 659 self.comps = None 660 return self._getGroups()
661
662 - def _setGroups(self, val):
663 if val is None: 664 # if we unset the comps object, we need to undo which repos have 665 # been added to the group file as well 666 if self._repos: 667 for repo in self._repos.listGroupsEnabled(): 668 repo.groups_added = False 669 self._comps = val
670
671 - def _getGroups(self):
672 """create the groups object that will store the comps metadata 673 finds the repos with groups, gets their comps data and merge it 674 into the group object""" 675 676 if self._comps: 677 return self._comps 678 679 group_st = time.time() 680 self.verbose_logger.log(logginglevels.DEBUG_4, 681 _('Getting group metadata')) 682 reposWithGroups = [] 683 self.repos.doSetup() 684 for repo in self.repos.listGroupsEnabled(): 685 if repo.groups_added: # already added the groups from this repo 686 reposWithGroups.append(repo) 687 continue 688 689 if not repo.ready(): 690 raise Errors.RepoError, "Repository '%s' not yet setup" % repo 691 try: 692 groupremote = repo.getGroupLocation() 693 except Errors.RepoMDError, e: 694 pass 695 else: 696 reposWithGroups.append(repo) 697 698 # now we know which repos actually have groups files. 699 overwrite = self.conf.overwrite_groups 700 self._comps = comps.Comps(overwrite_groups = overwrite) 701 702 for repo in reposWithGroups: 703 if repo.groups_added: # already added the groups from this repo 704 continue 705 706 self.verbose_logger.log(logginglevels.DEBUG_4, 707 _('Adding group file from repository: %s'), repo) 708 groupfile = repo.getGroups() 709 # open it up as a file object so iterparse can cope with our gz file 710 if groupfile is not None and groupfile.endswith('.gz'): 711 groupfile = gzip.open(groupfile) 712 713 try: 714 self._comps.add(groupfile) 715 except (Errors.GroupsError,Errors.CompsException), e: 716 msg = _('Failed to add groups file for repository: %s - %s') % (repo, str(e)) 717 self.logger.critical(msg) 718 else: 719 repo.groups_added = True 720 721 if self._comps.compscount == 0: 722 raise Errors.GroupsError, _('No Groups Available in any repository') 723 724 self._comps.compile(self.rpmdb.simplePkgList()) 725 self.verbose_logger.debug('group time: %0.3f' % (time.time() - group_st)) 726 return self._comps
727
728 - def _getTags(self):
729 """ create the tags object used to search/report from the pkgtags 730 metadata""" 731 732 tag_st = time.time() 733 self.verbose_logger.log(logginglevels.DEBUG_4, 734 _('Getting pkgtags metadata')) 735 736 if self._tags is None: 737 self._tags = yum.pkgtag_db.PackageTags() 738 739 for repo in self.repos.listEnabled(): 740 if 'pkgtags' not in repo.repoXML.fileTypes(): 741 continue 742 743 self.verbose_logger.log(logginglevels.DEBUG_4, 744 _('Adding tags from repository: %s'), repo) 745 746 # fetch the sqlite tagdb 747 try: 748 tag_md = repo.retrieveMD('pkgtags') 749 tag_sqlite = yum.misc.decompress(tag_md) 750 # feed it into _tags.add() 751 self._tags.add(repo.id, tag_sqlite) 752 except (Errors.RepoError, Errors.PkgTagsError), e: 753 msg = _('Failed to add Pkg Tags for repository: %s - %s') % (repo, str(e)) 754 self.logger.critical(msg) 755 756 757 self.verbose_logger.debug('tags time: %0.3f' % (time.time() - tag_st)) 758 return self._tags
759
760 - def _getHistory(self):
761 """auto create the history object that to access/append the transaction 762 history information. """ 763 if self._history is None: 764 pdb_path = self.conf.persistdir + "/history" 765 self._history = yum.history.YumHistory(root=self.conf.installroot, 766 db_path=pdb_path) 767 return self._history
768 769 # properties so they auto-create themselves with defaults 770 repos = property(fget=lambda self: self._getRepos(), 771 fset=lambda self, value: setattr(self, "_repos", value), 772 fdel=lambda self: self._delRepos(), 773 doc="Repo Storage object - object of yum repositories") 774 pkgSack = property(fget=lambda self: self._getSacks(), 775 fset=lambda self, value: setattr(self, "_pkgSack", value), 776 fdel=lambda self: self._delSacks(), 777 doc="Package sack object - object of yum package objects") 778 conf = property(fget=lambda self: self._getConfig(), 779 fset=lambda self, value: setattr(self, "_conf", value), 780 fdel=lambda self: setattr(self, "_conf", None), 781 doc="Yum Config Object") 782 rpmdb = property(fget=lambda self: self._getRpmDB(), 783 fset=lambda self, value: setattr(self, "_rpmdb", value), 784 fdel=lambda self: setattr(self, "_rpmdb", None), 785 doc="RpmSack object") 786 tsInfo = property(fget=lambda self: self._getTsInfo(), 787 fset=lambda self,value: self._setTsInfo(value), 788 fdel=lambda self: self._delTsInfo(), 789 doc="Transaction Set information object") 790 ts = property(fget=lambda self: self._getActionTs(), 791 fdel=lambda self: self._deleteTs(), 792 doc="TransactionSet object") 793 up = property(fget=lambda self: self._getUpdates(), 794 fset=lambda self, value: setattr(self, "_up", value), 795 fdel=lambda self: setattr(self, "_up", None), 796 doc="Updates Object") 797 comps = property(fget=lambda self: self._getGroups(), 798 fset=lambda self, value: self._setGroups(value), 799 fdel=lambda self: setattr(self, "_comps", None), 800 doc="Yum Component/groups object") 801 history = property(fget=lambda self: self._getHistory(), 802 fset=lambda self, value: setattr(self, "_history",value), 803 fdel=lambda self: setattr(self, "_history", None), 804 doc="Yum History Object") 805 806 pkgtags = property(fget=lambda self: self._getTags(), 807 fset=lambda self, value: setattr(self, "_tags",value), 808 fdel=lambda self: setattr(self, "_tags", None), 809 doc="Yum Package Tags Object") 810 811
812 - def doSackFilelistPopulate(self):
813 """convenience function to populate the repos with the filelist metadata 814 it also is simply to only emit a log if anything actually gets populated""" 815 816 necessary = False 817 818 # I can't think of a nice way of doing this, we have to have the sack here 819 # first or the below does nothing so... 820 if self.pkgSack: 821 for repo in self.repos.listEnabled(): 822 if repo in repo.sack.added: 823 if 'filelists' in repo.sack.added[repo]: 824 continue 825 else: 826 necessary = True 827 else: 828 necessary = True 829 830 if necessary: 831 msg = _('Importing additional filelist information') 832 self.verbose_logger.log(logginglevels.INFO_2, msg) 833 self.repos.populateSack(mdtype='filelists')
834
835 - def yumUtilsMsg(self, func, prog):
836 """ Output a message that the tool requires the yum-utils package, 837 if not installed. """ 838 if self.rpmdb.contains(name="yum-utils"): 839 return 840 841 hibeg, hiend = "", "" 842 if hasattr(self, 'term'): 843 hibeg, hiend = self.term.MODE['bold'], self.term.MODE['normal'] 844 845 func(_("The program %s%s%s is found in the yum-utils package.") % 846 (hibeg, prog, hiend))
847
848 - def buildTransaction(self, unfinished_transactions_check=True):
849 """go through the packages in the transaction set, find them in the 850 packageSack or rpmdb, and pack up the ts accordingly""" 851 if (unfinished_transactions_check and 852 misc.find_unfinished_transactions(yumlibpath=self.conf.persistdir)): 853 msg = _('There are unfinished transactions remaining. You might ' \ 854 'consider running yum-complete-transaction first to finish them.' ) 855 self.logger.critical(msg) 856 self.yumUtilsMsg(self.logger.critical, "yum-complete-transaction") 857 time.sleep(3) 858 859 self.plugins.run('preresolve') 860 ds_st = time.time() 861 862 (rescode, restring) = self.resolveDeps() 863 self._limit_installonly_pkgs() 864 865 # We _must_ get rid of all the used tses before we go on, so that C-c 866 # works for downloads / mirror failover etc. 867 self.rpmdb.ts = None 868 869 # do the skip broken magic, if enabled and problems exist 870 (rescode, restring) = self._doSkipBroken(rescode, restring) 871 872 self.plugins.run('postresolve', rescode=rescode, restring=restring) 873 874 if self.tsInfo.changed: 875 (rescode, restring) = self.resolveDeps(rescode == 1) 876 # If transaction was changed by postresolve plugins then we should run skipbroken again 877 (rescode, restring) = self._doSkipBroken(rescode, restring, clear_skipped=False ) 878 879 if self.tsInfo.pkgSack is not None: # rm Transactions don't have pkgSack 880 self.tsInfo.pkgSack.dropCachedData() 881 self.rpmdb.dropCachedData() 882 883 self.verbose_logger.debug('Depsolve time: %0.3f' % (time.time() - ds_st)) 884 return rescode, restring
885
886 - def _doSkipBroken(self,rescode, restring, clear_skipped=True):
887 ''' do skip broken if it is enabled ''' 888 # if depsolve failed and skipbroken is enabled 889 # The remove the broken packages from the transactions and 890 # Try another depsolve 891 if self.conf.skip_broken and rescode==1: 892 if clear_skipped: 893 self.skipped_packages = [] # reset the public list of skipped packages. 894 sb_st = time.time() 895 rescode, restring = self._skipPackagesWithProblems(rescode, restring) 896 self._printTransaction() 897 self.verbose_logger.debug('Skip-Broken time: %0.3f' % (time.time() - sb_st)) 898 return (rescode, restring)
899 900
901 - def _skipPackagesWithProblems(self, rescode, restring):
902 ''' Remove the packages with depsolve errors and depsolve again ''' 903 904 def _remove(po, depTree, toRemove): 905 if not po: 906 return 907 self._getPackagesToRemove(po, depTree, toRemove) 908 # Only remove non installed packages from pkgSack 909 _remove_from_sack(po)
910 911 def _remove_from_sack(po): 912 # get all compatible arch packages from pkgSack 913 # we need to remove them too so i386 packages are not 914 # dragged in when a x86_64 is skipped. 915 pkgs = self._getPackagesToRemoveAllArch(po) 916 for pkg in pkgs: 917 if not po.repoid == 'installed' and pkg not in removed_from_sack: 918 self.verbose_logger.debug('SKIPBROKEN: removing %s from pkgSack & updates' % str(po)) 919 self.pkgSack.delPackage(pkg) 920 self.up.delPackage(pkg.pkgtup) 921 removed_from_sack.add(pkg)
922 923 # Keep removing packages & Depsolve until all errors is gone 924 # or the transaction is empty 925 count = 0 926 skipped_po = set() 927 removed_from_sack = set() 928 orig_restring = restring # Keep the old error messages 929 looping = 0 930 while (len(self.po_with_problems) > 0 and rescode == 1): 931 count += 1 932 # Remove all the rpmdb cache data, this is somewhat heavy handed 933 # but easier than removing/altering specific bits of the cache ... 934 # and skip-broken shouldn't care too much about speed. 935 self.rpmdb.transactionReset() 936 self.installedFileRequires = None # Kind of hacky 937 self.verbose_logger.debug(_("Skip-broken round %i"), count) 938 self._printTransaction() 939 depTree = self._buildDepTree() 940 startTs = set(self.tsInfo) 941 toRemove = set() 942 for po,wpo,err in self.po_with_problems: 943 # check if the problem is caused by a package in the transaction 944 if not self.tsInfo.exists(po.pkgtup): 945 _remove(wpo, depTree, toRemove) 946 else: 947 _remove(po, depTree, toRemove) 948 for po in toRemove: 949 skipped = self._skipFromTransaction(po) 950 for skip in skipped: 951 skipped_po.add(skip) 952 # make sure we get the compat arch packages skip from pkgSack and up too. 953 if skip not in removed_from_sack and skip.repoid == 'installed': 954 _remove_from_sack(skip) 955 # Nothing was removed, so we still got a problem 956 # the first time we get here we reset the resolved members of 957 # tsInfo and takes a new run all members in the current transaction 958 if not toRemove: 959 looping += 1 960 if looping > 2: 961 break # Bail out 962 else: 963 self.verbose_logger.debug('SKIPBROKEN: resetting already resolved packages (no packages to skip)' ) 964 self.tsInfo.resetResolved(hard=True) 965 rescode, restring = self.resolveDeps(True) 966 endTs = set(self.tsInfo) 967 # Check if tsInfo has changes since we started to skip packages 968 # if there is no changes then we got a loop. 969 # the first time we get here we reset the resolved members of 970 # tsInfo and takes a new run all members in the current transaction 971 if startTs-endTs == set(): 972 looping += 1 973 if looping > 2: 974 break # Bail out 975 else: 976 self.verbose_logger.debug('SKIPBROKEN: resetting already resolved packages (transaction not changed)' ) 977 self.tsInfo.resetResolved(hard=True) 978 979 # if we are all clear, then we have to check that the whole current transaction 980 # can complete the depsolve without error, because the packages skipped 981 # can have broken something that passed the tests earlier. 982 # FIXME: We need do this in a better way. 983 if rescode != 1: 984 self.verbose_logger.debug('SKIPBROKEN: sanity check the current transaction' ) 985 self.tsInfo.resetResolved(hard=True) 986 self._checkMissingObsoleted() # This is totally insane, but needed :( 987 self._checkUpdatedLeftovers() # Cleanup updated leftovers 988 rescode, restring = self.resolveDeps() 989 if rescode != 1: 990 self.verbose_logger.debug(_("Skip-broken took %i rounds "), count) 991 self.verbose_logger.info(_('\nPackages skipped because of dependency problems:')) 992 skipped_list = [p for p in skipped_po] 993 skipped_list.sort() 994 for po in skipped_list: 995 msg = _(" %s from %s") % (str(po),po.repo.id) 996 self.verbose_logger.info(msg) 997 self.skipped_packages.extend(skipped_list) # make the skipped packages public 998 else: 999 # If we cant solve the problems the show the original error messages. 1000 self.verbose_logger.info("Skip-broken could not solve problems") 1001 return 1, orig_restring 1002 return rescode, restring 1003
1004 - def _checkMissingObsoleted(self):
1005 """ 1006 If multiple packages is obsoleting the same package 1007 then the TS_OBSOLETED can get removed from the transaction 1008 so we must make sure that they, exist and else create them 1009 """ 1010 for txmbr in self.tsInfo.getMembersWithState(None, [TS_OBSOLETING,TS_OBSOLETED]): 1011 for pkg in txmbr.obsoletes: 1012 if not self.tsInfo.exists(pkg.pkgtup): 1013 obs = self.tsInfo.addObsoleted(pkg,txmbr.po) 1014 self.verbose_logger.debug('SKIPBROKEN: Added missing obsoleted %s (%s)' % (pkg,txmbr.po) ) 1015 for pkg in txmbr.obsoleted_by: 1016 # check if the obsoleting txmbr is in the transaction 1017 # else remove the obsoleted txmbr 1018 # it clean out some really wierd cases 1019 if not self.tsInfo.exists(pkg.pkgtup): 1020 self.verbose_logger.debug('SKIPBROKEN: Remove extra obsoleted %s (%s)' % (txmbr.po,pkg) ) 1021 self.tsInfo.remove(txmbr.po.pkgtup)
1022
1023 - def _checkUpdatedLeftovers(self):
1024 """ 1025 If multiple packages is updated the same package 1026 and this package get removed because of an dep issue 1027 then make sure that all the TS_UPDATED get removed. 1028 """ 1029 for txmbr in self.tsInfo.getMembersWithState(None, [TS_UPDATED]): 1030 for pkg in txmbr.updated_by: 1031 # check if the updating txmbr is in the transaction 1032 # else remove the updated txmbr 1033 # it clean out some really wierd cases with dupes installed on the system 1034 if not self.tsInfo.exists(pkg.pkgtup): 1035 self.verbose_logger.debug('SKIPBROKEN: Remove extra updated %s (%s)' % (txmbr.po,pkg) ) 1036 self.tsInfo.remove(txmbr.po.pkgtup)
1037
1038 - def _getPackagesToRemoveAllArch(self,po):
1039 ''' get all compatible arch packages in pkgSack''' 1040 pkgs = [] 1041 if self.arch.multilib: 1042 n,a,e,v,r = po.pkgtup 1043 # skip for all compat archs 1044 for a in self.arch.archlist: 1045 pkgtup = (n,a,e,v,r) 1046 matched = self.pkgSack.searchNevra(n,e,v,r,a) 1047 pkgs.extend(matched) 1048 else: 1049 pkgs.append(po) 1050 return pkgs
1051 1052 1053 1054 1055
1056 - def _skipFromTransaction(self,po):
1057 skipped = [] 1058 n,a,e,v,r = po.pkgtup 1059 # skip for all compat archs 1060 for a in self.arch.archlist: 1061 pkgtup = (n,a,e,v,r) 1062 if self.tsInfo.exists(pkgtup): 1063 for txmbr in self.tsInfo.getMembers(pkgtup): 1064 pkg = txmbr.po 1065 skip = self._removePoFromTransaction(pkg) 1066 skipped.extend(skip) 1067 return skipped
1068
1069 - def _removePoFromTransaction(self,po):
1070 skip = [] 1071 if self.tsInfo.exists(po.pkgtup): 1072 self.verbose_logger.debug('SKIPBROKEN: removing %s from transaction' % str(po)) 1073 self.tsInfo.remove(po.pkgtup) 1074 if not po.repoid == 'installed': 1075 skip.append(po) 1076 return skip
1077
1078 - def _buildDepTree(self):
1079 ''' create a dictionary with po and deps ''' 1080 depTree = { } 1081 for txmbr in self.tsInfo: 1082 for dep in txmbr.depends_on: 1083 depTree.setdefault(dep, []).append(txmbr.po) 1084 # self._printDepTree(depTree) 1085 return depTree
1086
1087 - def _printDepTree(self, tree):
1088 for pkg, l in tree.iteritems(): 1089 print pkg 1090 for p in l: 1091 print "\t", p
1092
1093 - def _printTransaction(self):
1094 #transaction set states 1095 state = { TS_UPDATE : "update", 1096 TS_INSTALL : "install", 1097 TS_TRUEINSTALL: "trueinstall", 1098 TS_ERASE : "erase", 1099 TS_OBSOLETED : "obsoleted", 1100 TS_OBSOLETING : "obsoleting", 1101 TS_AVAILABLE : "available", 1102 TS_UPDATED : "updated"} 1103 1104 self.verbose_logger.log(logginglevels.DEBUG_2,"TSINFO: Current Transaction : %i member(s) " % len(self.tsInfo)) 1105 for txmbr in sorted(self.tsInfo): 1106 msg = " %-11s : %s " % (state[txmbr.output_state],txmbr.po) 1107 self.verbose_logger.log(logginglevels.DEBUG_2, msg) 1108 for po,rel in sorted(txmbr.relatedto): 1109 msg = " %s : %s" % (rel,po) 1110 self.verbose_logger.log(logginglevels.DEBUG_2, msg)
1111 1112
1113 - def _getPackagesToRemove(self,po,deptree,toRemove):
1114 ''' 1115 get the (related) pos to remove. 1116 ''' 1117 toRemove.add(po) 1118 for txmbr in self.tsInfo.getMembers(po.pkgtup): 1119 for pkg in (txmbr.updates + txmbr.obsoletes): 1120 toRemove.add(pkg) 1121 self._getDepsToRemove(pkg, deptree, toRemove) 1122 self._getDepsToRemove(po, deptree, toRemove)
1123
1124 - def _getDepsToRemove(self,po, deptree, toRemove):
1125 for dep in deptree.get(po, []): # Loop trough all deps of po 1126 for txmbr in self.tsInfo.getMembers(dep.pkgtup): 1127 for pkg in (txmbr.updates + txmbr.obsoletes): 1128 toRemove.add(pkg) 1129 toRemove.add(dep) 1130 self._getDepsToRemove(dep, deptree, toRemove)
1131
1132 - def _rpmdb_warn_checks(self, out=None, warn=True, chkcmd='all'):
1133 if out is None: 1134 out = self.logger.warning 1135 if warn: 1136 out(_('Warning: RPMDB altered outside of yum.')) 1137 1138 rc = 0 1139 probs = [] 1140 if chkcmd in ('all', 'dependencies'): 1141 prob2ui = {'requires' : _('missing requires'), 1142 'conflicts' : _('installed conflict')} 1143 probs.extend(self.rpmdb.check_dependencies()) 1144 1145 if chkcmd in ('all', 'duplicates'): 1146 iopkgs = set(self.conf.installonlypkgs) 1147 probs.extend(self.rpmdb.check_duplicates(iopkgs)) 1148 1149 for prob in sorted(probs): 1150 out(prob) 1151 1152 return len(probs)
1153
1154 - def runTransaction(self, cb):
1155 """takes an rpm callback object, performs the transaction""" 1156 1157 self.plugins.run('pretrans') 1158 1159 # We may want to put this other places, eventually, but for now it's 1160 # good as long as we get it right for history. 1161 for repo in self.repos.listEnabled(): 1162 if repo._xml2sqlite_local: 1163 self.run_with_package_names.add('yum-metadata-parser') 1164 break 1165 1166 using_pkgs_pats = list(self.run_with_package_names) 1167 using_pkgs = self.rpmdb.returnPackages(patterns=using_pkgs_pats) 1168 rpmdbv = self.rpmdb.simpleVersion(main_only=True)[0] 1169 lastdbv = self.history.last() 1170 if lastdbv is not None: 1171 lastdbv = lastdbv.end_rpmdbversion 1172 if lastdbv is None or rpmdbv != lastdbv: 1173 self._rpmdb_warn_checks(warn=lastdbv is not None) 1174 if self.conf.history_record: 1175 self.history.beg(rpmdbv, using_pkgs, list(self.tsInfo)) 1176 1177 # Just before we update the transaction, update what we think the 1178 # rpmdb will look like. This needs to be done before the run, so that if 1179 # "something" happens and the rpmdb is different from what we think it 1180 # will be we store what we thought, not what happened (so it'll be an 1181 # invalid cache). 1182 self.rpmdb.transactionResultVersion(self.tsInfo.futureRpmDBVersion()) 1183 1184 errors = self.ts.run(cb.callback, '') 1185 # ts.run() exit codes are, hmm, "creative": None means all ok, empty 1186 # list means some errors happened in the transaction and non-empty 1187 # list that there were errors preventing the ts from starting... 1188 1189 # make resultobject - just a plain yumgenericholder object 1190 resultobject = misc.GenericHolder() 1191 resultobject.return_code = 0 1192 if errors is None: 1193 pass 1194 elif len(errors) == 0: 1195 errstring = _('Warning: scriptlet or other non-fatal errors occurred during transaction.') 1196 self.verbose_logger.debug(errstring) 1197 resultobject.return_code = 1 1198 else: 1199 if self.conf.history_record: 1200 herrors = [to_unicode(to_str(x)) for x in errors] 1201 self.history.end(rpmdbv, 2, errors=herrors) 1202 raise Errors.YumBaseError, errors 1203 1204 if not self.conf.keepcache: 1205 self.cleanUsedHeadersPackages() 1206 1207 for i in ('ts_all_fn', 'ts_done_fn'): 1208 if hasattr(cb, i): 1209 fn = getattr(cb, i) 1210 try: 1211 misc.unlink_f(fn) 1212 except (IOError, OSError), e: 1213 self.logger.critical(_('Failed to remove transaction file %s') % fn) 1214 1215 self.rpmdb.dropCachedData() # drop out the rpm cache so we don't step on bad hdr indexes 1216 self.plugins.run('posttrans') 1217 # sync up what just happened versus what is in the rpmdb 1218 self.verifyTransaction(resultobject) 1219 return resultobject
1220
1221 - def verifyTransaction(self, resultobject=None):
1222 """checks that the transaction did what we expected it to do. Also 1223 propagates our external yumdb info""" 1224 1225 # check to see that the rpmdb and the tsInfo roughly matches 1226 # push package object metadata outside of rpmdb into yumdb 1227 # delete old yumdb metadata entries 1228 1229 # for each pkg in the tsInfo 1230 # if it is an install - see that the pkg is installed 1231 # if it is a remove - see that the pkg is no longer installed, provided 1232 # that there is not also an install of this pkg in the tsInfo (reinstall) 1233 # for any kind of install add from_repo to the yumdb, and the cmdline 1234 # and the install reason 1235 1236 self.rpmdb.dropCachedData() 1237 for txmbr in self.tsInfo: 1238 if txmbr.output_state in TS_INSTALL_STATES: 1239 if not self.rpmdb.contains(po=txmbr.po): 1240 # maybe a file log here, too 1241 # but raising an exception is not going to do any good 1242 self.logger.critical(_('%s was supposed to be installed' \ 1243 ' but is not!' % txmbr.po)) 1244 continue 1245 po = self.getInstalledPackageObject(txmbr.pkgtup) 1246 rpo = txmbr.po 1247 po.yumdb_info.from_repo = rpo.repoid 1248 po.yumdb_info.reason = txmbr.reason 1249 po.yumdb_info.releasever = self.conf.yumvar['releasever'] 1250 if hasattr(self, 'cmds') and self.cmds: 1251 po.yumdb_info.command_line = ' '.join(self.cmds) 1252 csum = rpo.returnIdSum() 1253 if csum is not None: 1254 po.yumdb_info.checksum_type = str(csum[0]) 1255 po.yumdb_info.checksum_data = str(csum[1]) 1256 1257 if isinstance(rpo, YumLocalPackage): 1258 try: 1259 st = os.stat(rpo.localPkg()) 1260 lp_ctime = str(int(st.st_ctime)) 1261 lp_mtime = str(int(st.st_mtime)) 1262 po.yumdb_info.from_repo_revision = lp_ctime 1263 po.yumdb_info.from_repo_timestamp = lp_mtime 1264 except: pass 1265 1266 if not hasattr(rpo.repo, 'repoXML'): 1267 continue 1268 1269 md = rpo.repo.repoXML 1270 if md and md.revision is not None: 1271 po.yumdb_info.from_repo_revision = str(md.revision) 1272 if md: 1273 po.yumdb_info.from_repo_timestamp = str(md.timestamp) 1274 1275 elif txmbr.output_state in TS_REMOVE_STATES: 1276 if self.rpmdb.contains(po=txmbr.po): 1277 if not self.tsInfo.getMembersWithState(pkgtup=txmbr.pkgtup, 1278 output_states=TS_INSTALL_STATES): 1279 # maybe a file log here, too 1280 # but raising an exception is not going to do any good 1281 self.logger.critical(_('%s was supposed to be removed' \ 1282 ' but is not!' % txmbr.po)) 1283 continue 1284 yumdb_item = self.rpmdb.yumdb.get_package(po=txmbr.po) 1285 yumdb_item.clean() 1286 else: 1287 self.verbose_logger.log(logginglevels.DEBUG_2, 'What is this? %s' % txmbr.po) 1288 1289 if self.conf.history_record: 1290 ret = -1 1291 if resultobject is not None: 1292 ret = resultobject.return_code 1293 self.history.end(self.rpmdb.simpleVersion(main_only=True)[0], ret) 1294 self.rpmdb.dropCachedData()
1295
1296 - def costExcludePackages(self):
1297 """ Create an excluder for repos. with higher cost. Eg. 1298 repo-A:cost=1 repo-B:cost=2 ... here we setup an excluder on repo-B 1299 that looks for pkgs in repo-B.""" 1300 1301 # if all the repo.costs are equal then don't bother running things 1302 costs = {} 1303 for r in self.repos.listEnabled(): 1304 costs.setdefault(r.cost, []).append(r) 1305 1306 if len(costs) <= 1: 1307 return 1308 1309 done = False 1310 exid = "yum.costexcludes" 1311 orepos = [] 1312 for cost in sorted(costs): 1313 if done: # Skip the first one, as they have lowest cost so are good. 1314 for repo in costs[cost]: 1315 yce = _YumCostExclude(repo, self.repos) 1316 repo.sack.addPackageExcluder(repo.id, exid, 1317 'exclude.pkgtup.in', yce) 1318 orepos.extend(costs[cost]) 1319 done = True
1320
1321 - def excludePackages(self, repo=None):
1322 """removes packages from packageSacks based on global exclude lists, 1323 command line excludes and per-repository excludes, takes optional 1324 repo object to use.""" 1325 1326 if "all" in self.conf.disable_excludes: 1327 return 1328 1329 # if not repo: then assume global excludes, only 1330 # if repo: then do only that repos' packages and excludes 1331 1332 if not repo: # global only 1333 if "main" in self.conf.disable_excludes: 1334 return 1335 excludelist = self.conf.exclude 1336 repoid = None 1337 exid_beg = 'yum.excludepkgs' 1338 else: 1339 if repo.id in self.conf.disable_excludes: 1340 return 1341 excludelist = repo.getExcludePkgList() 1342 repoid = repo.id 1343 exid_beg = 'yum.excludepkgs.' + repoid 1344 1345 count = 0 1346 for match in excludelist: 1347 count += 1 1348 exid = "%s.%u" % (exid_beg, count) 1349 self.pkgSack.addPackageExcluder(repoid, exid,'exclude.match', match)
1350
1351 - def includePackages(self, repo):
1352 """removes packages from packageSacks based on list of packages, to include. 1353 takes repoid as a mandatory argument.""" 1354 1355 includelist = repo.getIncludePkgList() 1356 1357 if len(includelist) == 0: 1358 return 1359 1360 # includepkgs actually means "exclude everything that doesn't match". 1361 # So we mark everything, then wash those we want to keep and then 1362 # exclude everything that is marked. 1363 exid = "yum.includepkgs.1" 1364 self.pkgSack.addPackageExcluder(repo.id, exid, 'mark.washed') 1365 count = 0 1366 for match in includelist: 1367 count += 1 1368 exid = "%s.%u" % ("yum.includepkgs.2", count) 1369 self.pkgSack.addPackageExcluder(repo.id, exid, 'wash.match', match) 1370 exid = "yum.includepkgs.3" 1371 self.pkgSack.addPackageExcluder(repo.id, exid, 'exclude.marked')
1372
1373 - def doLock(self, lockfile = YUM_PID_FILE):
1374 """perform the yum locking, raise yum-based exceptions, not OSErrors""" 1375 1376 # if we're not root then we don't lock - just return nicely 1377 if self.conf.uid != 0: 1378 return 1379 1380 root = self.conf.installroot 1381 lockfile = root + '/' + lockfile # lock in the chroot 1382 lockfile = os.path.normpath(lockfile) # get rid of silly preceding extra / 1383 1384 mypid=str(os.getpid()) 1385 while not self._lock(lockfile, mypid, 0644): 1386 fd = open(lockfile, 'r') 1387 try: oldpid = int(fd.readline()) 1388 except ValueError: 1389 # bogus data in the pid file. Throw away. 1390 self._unlock(lockfile) 1391 else: 1392 if oldpid == os.getpid(): # if we own the lock, we're fine 1393 break 1394 try: os.kill(oldpid, 0) 1395 except OSError, e: 1396 if e[0] == errno.ESRCH: 1397 # The pid doesn't exist 1398 self._unlock(lockfile) 1399 else: 1400 # Whoa. What the heck happened? 1401 msg = _('Unable to check if PID %s is active') % oldpid 1402 raise Errors.LockError(1, msg, oldpid) 1403 else: 1404 # Another copy seems to be running. 1405 msg = _('Existing lock %s: another copy is running as pid %s.') % (lockfile, oldpid) 1406 raise Errors.LockError(0, msg, oldpid) 1407 # We've got the lock, store it so we can auto-unlock on __del__... 1408 self._lockfile = lockfile
1409
1410 - def doUnlock(self, lockfile=None):
1411 """do the unlock for yum""" 1412 1413 # if we're not root then we don't lock - just return nicely 1414 # Note that we can get here from __del__, so if we haven't created 1415 # YumBase.conf we don't want to do so here as creating stuff inside 1416 # __del__ is bad. 1417 if hasattr(self, 'preconf') or self.conf.uid != 0: 1418 return 1419 1420 if lockfile is not None: 1421 root = self.conf.installroot 1422 lockfile = root + '/' + lockfile # lock in the chroot 1423 elif self._lockfile is None: 1424 return # Don't delete other people's lock files on __del__ 1425 else: 1426 lockfile = self._lockfile # Get the value we locked with 1427 1428 self._unlock(lockfile) 1429 self._lockfile = None
1430
1431 - def _lock(self, filename, contents='', mode=0777):
1432 lockdir = os.path.dirname(filename) 1433 try: 1434 if not os.path.exists(lockdir): 1435 os.makedirs(lockdir, mode=0755) 1436 fd = os.open(filename, os.O_EXCL|os.O_CREAT|os.O_WRONLY, mode) 1437 except OSError, msg: 1438 if not msg.errno == errno.EEXIST: 1439 # Whoa. What the heck happened? 1440 errmsg = _('Could not create lock at %s: %s ') % (filename, str(msg)) 1441 raise Errors.LockError(msg.errno, errmsg, contents) 1442 return 0 1443 else: 1444 os.write(fd, contents) 1445 os.close(fd) 1446 return 1
1447
1448 - def _unlock(self, filename):
1449 misc.unlink_f(filename)
1450
1451 - def verifyPkg(self, fo, po, raiseError):
1452 """verifies the package is what we expect it to be 1453 raiseError = defaults to 0 - if 1 then will raise 1454 a URLGrabError if the file does not check out. 1455 otherwise it returns false for a failure, true for success""" 1456 failed = False 1457 1458 if type(fo) is types.InstanceType: 1459 fo = fo.filename 1460 1461 if fo != po.localPkg(): 1462 po.localpath = fo 1463 1464 if not po.verifyLocalPkg(): 1465 failed = True 1466 else: 1467 ylp = YumLocalPackage(self.rpmdb.readOnlyTS(), fo) 1468 if ylp.pkgtup != po.pkgtup: 1469 failed = True 1470 1471 1472 if failed: 1473 # if the file is wrong AND it is >= what we expected then it 1474 # can't be redeemed. If we can, kill it and start over fresh 1475 cursize = os.stat(fo)[6] 1476 totsize = long(po.size) 1477 if cursize >= totsize and not po.repo.cache: 1478 # if the path to the file is NOT inside the cachedir then don't 1479 # unlink it b/c it is probably a file:// url and possibly 1480 # unlinkable 1481 if fo.startswith(po.repo.cachedir): 1482 os.unlink(fo) 1483 1484 if raiseError: 1485 msg = _('Package does not match intended download. Suggestion: run yum clean metadata') 1486 raise URLGrabError(-1, msg) 1487 else: 1488 return False 1489 1490 1491 return True
1492 1493
1494 - def verifyChecksum(self, fo, checksumType, csum):
1495 """Verify the checksum of the file versus the 1496 provided checksum""" 1497 1498 try: 1499 filesum = misc.checksum(checksumType, fo) 1500 except Errors.MiscError, e: 1501 raise URLGrabError(-3, _('Could not perform checksum')) 1502 1503 if filesum != csum: 1504 raise URLGrabError(-1, _('Package does not match checksum')) 1505 1506 return 0
1507
1508 - def downloadPkgs(self, pkglist, callback=None, callback_total=None):
1509 def mediasort(apo, bpo): 1510 # FIXME: we should probably also use the mediaid; else we 1511 # could conceivably ping-pong between different disc1's 1512 a = apo.getDiscNum() 1513 b = bpo.getDiscNum() 1514 if a is None and b is None: 1515 return cmp(apo, bpo) 1516 if a is None: 1517 return -1 1518 if b is None: 1519 return 1 1520 if a < b: 1521 return -1 1522 elif a > b: 1523 return 1 1524 return 0
1525 1526 """download list of package objects handed to you, output based on 1527 callback, raise yum.Errors.YumBaseError on problems""" 1528 1529 errors = {} 1530 def adderror(po, msg): 1531 errors.setdefault(po, []).append(msg) 1532 1533 # We close the history DB here because some plugins (presto) use 1534 # threads. And sqlite really doesn't like threads. And while I don't 1535 # think it should matter, we've had some reports of history DB 1536 # corruption, and it was implied that it happened just after C-c 1537 # at download time and this is a safe thing to do. 1538 # Note that manual testing shows that history is not connected by 1539 # this point, from the cli with no plugins. So this really does 1540 # nothing *sigh*. 1541 self.history.close() 1542 1543 self.plugins.run('predownload', pkglist=pkglist) 1544 repo_cached = False 1545 remote_pkgs = [] 1546 remote_size = 0 1547 for po in pkglist: 1548 if hasattr(po, 'pkgtype') and po.pkgtype == 'local': 1549 continue 1550 1551 local = po.localPkg() 1552 if os.path.exists(local): 1553 if not self.verifyPkg(local, po, False): 1554 if po.repo.cache: 1555 repo_cached = True 1556 adderror(po, _('package fails checksum but caching is ' 1557 'enabled for %s') % po.repo.id) 1558 else: 1559 self.verbose_logger.debug(_("using local copy of %s") %(po,)) 1560 continue 1561 1562 remote_pkgs.append(po) 1563 remote_size += po.size 1564 1565 # caching is enabled and the package 1566 # just failed to check out there's no 1567 # way to save this, report the error and return 1568 if (self.conf.cache or repo_cached) and errors: 1569 return errors 1570 1571 1572 remote_pkgs.sort(mediasort) 1573 # This is kind of a hack and does nothing in non-Fedora versions, 1574 # we'll fix it one way or anther soon. 1575 if (hasattr(urlgrabber.progress, 'text_meter_total_size') and 1576 len(remote_pkgs) > 1): 1577 urlgrabber.progress.text_meter_total_size(remote_size) 1578 beg_download = time.time() 1579 i = 0 1580 local_size = 0 1581 for po in remote_pkgs: 1582 # Recheck if the file is there, works around a couple of weird 1583 # edge cases. 1584 local = po.localPkg() 1585 i += 1 1586 if os.path.exists(local): 1587 if self.verifyPkg(local, po, False): 1588 self.verbose_logger.debug(_("using local copy of %s") %(po,)) 1589 remote_size -= po.size 1590 if hasattr(urlgrabber.progress, 'text_meter_total_size'): 1591 urlgrabber.progress.text_meter_total_size(remote_size, 1592 local_size) 1593 continue 1594 if os.path.getsize(local) >= po.size: 1595 os.unlink(local) 1596 1597 checkfunc = (self.verifyPkg, (po, 1), {}) 1598 dirstat = os.statvfs(po.repo.pkgdir) 1599 if (dirstat.f_bavail * dirstat.f_bsize) <= long(po.size): 1600 adderror(po, _('Insufficient space in download directory %s\n' 1601 " * free %s\n" 1602 " * needed %s") % 1603 (po.repo.pkgdir, 1604 format_number(dirstat.f_bavail * dirstat.f_bsize), 1605 format_number(po.size))) 1606 continue 1607 1608 try: 1609 if i == 1 and not local_size and remote_size == po.size: 1610 text = os.path.basename(po.relativepath) 1611 else: 1612 text = '(%s/%s): %s' % (i, len(remote_pkgs), 1613 os.path.basename(po.relativepath)) 1614 mylocal = po.repo.getPackage(po, 1615 checkfunc=checkfunc, 1616 text=text, 1617 cache=po.repo.http_caching != 'none', 1618 ) 1619 local_size += po.size 1620 if hasattr(urlgrabber.progress, 'text_meter_total_size'): 1621 urlgrabber.progress.text_meter_total_size(remote_size, 1622 local_size) 1623 except Errors.RepoError, e: 1624 adderror(po, str(e)) 1625 else: 1626 po.localpath = mylocal 1627 if po in errors: 1628 del errors[po] 1629 1630 if hasattr(urlgrabber.progress, 'text_meter_total_size'): 1631 urlgrabber.progress.text_meter_total_size(0) 1632 if callback_total is not None and not errors: 1633 callback_total(remote_pkgs, remote_size, beg_download) 1634 1635 self.plugins.run('postdownload', pkglist=pkglist, errors=errors) 1636 1637 return errors 1638
1639 - def verifyHeader(self, fo, po, raiseError):
1640 """check the header out via it's naevr, internally""" 1641 if type(fo) is types.InstanceType: 1642 fo = fo.filename 1643 1644 try: 1645 hlist = rpm.readHeaderListFromFile(fo) 1646 hdr = hlist[0] 1647 except (rpm.error, IndexError): 1648 if raiseError: 1649 raise URLGrabError(-1, _('Header is not complete.')) 1650 else: 1651 return 0 1652 1653 yip = YumInstalledPackage(hdr) # we're using YumInstalledPackage b/c 1654 # it takes headers <shrug> 1655 if yip.pkgtup != po.pkgtup: 1656 if raiseError: 1657 raise URLGrabError(-1, 'Header does not match intended download') 1658 else: 1659 return 0 1660 1661 return 1
1662
1663 - def downloadHeader(self, po):
1664 """download a header from a package object. 1665 output based on callback, raise yum.Errors.YumBaseError on problems""" 1666 1667 if hasattr(po, 'pkgtype') and po.pkgtype == 'local': 1668 return 1669 1670 errors = {} 1671 local = po.localHdr() 1672 repo = self.repos.getRepo(po.repoid) 1673 if os.path.exists(local): 1674 try: 1675 result = self.verifyHeader(local, po, raiseError=1) 1676 except URLGrabError, e: 1677 # might add a check for length of file - if it is < 1678 # required doing a reget 1679 misc.unlink_f(local) 1680 else: 1681 po.hdrpath = local 1682 return 1683 else: 1684 if self.conf.cache: 1685 raise Errors.RepoError, \ 1686 _('Header not in local cache and caching-only mode enabled. Cannot download %s') % po.hdrpath 1687 1688 if self.dsCallback: self.dsCallback.downloadHeader(po.name) 1689 1690 try: 1691 if not os.path.exists(repo.hdrdir): 1692 os.makedirs(repo.hdrdir) 1693 checkfunc = (self.verifyHeader, (po, 1), {}) 1694 hdrpath = repo.getHeader(po, checkfunc=checkfunc, 1695 cache=repo.http_caching != 'none', 1696 ) 1697 except Errors.RepoError, e: 1698 saved_repo_error = e 1699 try: 1700 misc.unlink_f(local) 1701 except OSError, e: 1702 raise Errors.RepoError, saved_repo_error 1703 else: 1704 raise Errors.RepoError, saved_repo_error 1705 else: 1706 po.hdrpath = hdrpath 1707 return
1708
1709 - def sigCheckPkg(self, po):
1710 ''' 1711 Take a package object and attempt to verify GPG signature if required 1712 1713 Returns (result, error_string) where result is: 1714 - 0 - GPG signature verifies ok or verification is not required. 1715 - 1 - GPG verification failed but installation of the right GPG key 1716 might help. 1717 - 2 - Fatal GPG verification error, give up. 1718 ''' 1719 if hasattr(po, 'pkgtype') and po.pkgtype == 'local': 1720 check = self.conf.gpgcheck 1721 hasgpgkey = 0 1722 else: 1723 repo = self.repos.getRepo(po.repoid) 1724 check = repo.gpgcheck 1725 hasgpgkey = not not repo.gpgkey 1726 1727 if check: 1728 ts = self.rpmdb.readOnlyTS() 1729 sigresult = rpmUtils.miscutils.checkSig(ts, po.localPkg()) 1730 localfn = os.path.basename(po.localPkg()) 1731 1732 if sigresult == 0: 1733 result = 0 1734 msg = '' 1735 1736 elif sigresult == 1: 1737 if hasgpgkey: 1738 result = 1 1739 else: 1740 result = 2 1741 msg = _('Public key for %s is not installed') % localfn 1742 1743 elif sigresult == 2: 1744 result = 2 1745 msg = _('Problem opening package %s') % localfn 1746 1747 elif sigresult == 3: 1748 if hasgpgkey: 1749 result = 1 1750 else: 1751 result = 2 1752 result = 1 1753 msg = _('Public key for %s is not trusted') % localfn 1754 1755 elif sigresult == 4: 1756 result = 2 1757 msg = _('Package %s is not signed') % localfn 1758 1759 else: 1760 result =0 1761 msg = '' 1762 1763 return result, msg
1764
1765 - def cleanUsedHeadersPackages(self):
1766 filelist = [] 1767 for txmbr in self.tsInfo: 1768 if txmbr.po.state not in TS_INSTALL_STATES: 1769 continue 1770 if txmbr.po.repoid == "installed": 1771 continue 1772 if txmbr.po.repoid not in self.repos.repos: 1773 continue 1774 1775 # make sure it's not a local file 1776 repo = self.repos.repos[txmbr.po.repoid] 1777 local = False 1778 for u in repo.baseurl: 1779 if u.startswith("file:"): 1780 local = True 1781 break 1782 1783 if local: 1784 filelist.extend([txmbr.po.localHdr()]) 1785 else: 1786 filelist.extend([txmbr.po.localPkg(), txmbr.po.localHdr()]) 1787 1788 # now remove them 1789 for fn in filelist: 1790 if not os.path.exists(fn): 1791 continue 1792 try: 1793 misc.unlink_f(fn) 1794 except OSError, e: 1795 self.logger.warning(_('Cannot remove %s'), fn) 1796 continue 1797 else: 1798 self.verbose_logger.log(logginglevels.DEBUG_4, 1799 _('%s removed'), fn)
1800
1801 - def cleanHeaders(self):
1802 exts = ['hdr'] 1803 return self._cleanFiles(exts, 'hdrdir', 'header')
1804
1805 - def cleanPackages(self):
1806 exts = ['rpm'] 1807 return self._cleanFiles(exts, 'pkgdir', 'package')
1808
1809 - def cleanSqlite(self):
1810 exts = ['sqlite', 'sqlite.bz2'] 1811 return self._cleanFiles(exts, 'cachedir', 'sqlite')
1812
1813 - def cleanMetadata(self):
1814 exts = ['xml.gz', 'xml', 'cachecookie', 'mirrorlist.txt', 'asc'] 1815 # Metalink is also here, but is a *.xml file 1816 return self._cleanFiles(exts, 'cachedir', 'metadata')
1817
1818 - def cleanExpireCache(self):
1819 exts = ['cachecookie', 'mirrorlist.txt'] 1820 return self._cleanFiles(exts, 'cachedir', 'metadata')
1821
1822 - def cleanRpmDB(self):
1823 cachedir = self.conf.cachedir + "/installed/" 1824 if not os.path.exists(cachedir): 1825 filelist = [] 1826 else: 1827 filelist = misc.getFileList(cachedir, '', []) 1828 return self._cleanFilelist('rpmdb', filelist)
1829
1830 - def _cleanFiles(self, exts, pathattr, filetype):
1831 filelist = [] 1832 for ext in exts: 1833 for repo in self.repos.listEnabled(): 1834 path = getattr(repo, pathattr) 1835 if os.path.exists(path) and os.path.isdir(path): 1836 filelist = misc.getFileList(path, ext, filelist) 1837 return self._cleanFilelist(filetype, filelist)
1838
1839 - def _cleanFilelist(self, filetype, filelist):
1840 removed = 0 1841 for item in filelist: 1842 try: 1843 misc.unlink_f(item) 1844 except OSError, e: 1845 self.logger.critical(_('Cannot remove %s file %s'), filetype, item) 1846 continue 1847 else: 1848 self.verbose_logger.log(logginglevels.DEBUG_4, 1849 _('%s file %s removed'), filetype, item) 1850 removed+=1 1851 msg = _('%d %s files removed') % (removed, filetype) 1852 return 0, [msg]
1853
1854 - def doPackageLists(self, pkgnarrow='all', patterns=None, showdups=None, 1855 ignore_case=False):
1856 """generates lists of packages, un-reduced, based on pkgnarrow option""" 1857 1858 if showdups is None: 1859 showdups = self.conf.showdupesfromrepos 1860 ygh = misc.GenericHolder(iter=pkgnarrow) 1861 1862 installed = [] 1863 available = [] 1864 reinstall_available = [] 1865 old_available = [] 1866 updates = [] 1867 obsoletes = [] 1868 obsoletesTuples = [] 1869 recent = [] 1870 extras = [] 1871 1872 ic = ignore_case 1873 # list all packages - those installed and available, don't 'think about it' 1874 if pkgnarrow == 'all': 1875 dinst = {} 1876 ndinst = {} # Newest versions by name.arch 1877 for po in self.rpmdb.returnPackages(patterns=patterns, 1878 ignore_case=ic): 1879 dinst[po.pkgtup] = po 1880 if showdups: 1881 continue 1882 key = (po.name, po.arch) 1883 if key not in ndinst or po.verGT(ndinst[key]): 1884 ndinst[key] = po 1885 installed = dinst.values() 1886 1887 if showdups: 1888 avail = self.pkgSack.returnPackages(patterns=patterns, 1889 ignore_case=ic) 1890 else: 1891 try: 1892 avail = self.pkgSack.returnNewestByNameArch(patterns=patterns, 1893 ignore_case=ic) 1894 except Errors.PackageSackError: 1895 avail = [] 1896 1897 for pkg in avail: 1898 if showdups: 1899 if pkg.pkgtup in dinst: 1900 reinstall_available.append(pkg) 1901 else: 1902 available.append(pkg) 1903 else: 1904 key = (pkg.name, pkg.arch) 1905 if pkg.pkgtup in dinst: 1906 reinstall_available.append(pkg) 1907 elif key not in ndinst or pkg.verGT(ndinst[key]): 1908 available.append(pkg) 1909 else: 1910 old_available.append(pkg) 1911 1912 # produce the updates list of tuples 1913 elif pkgnarrow == 'updates': 1914 for (n,a,e,v,r) in self.up.getUpdatesList(): 1915 matches = self.pkgSack.searchNevra(name=n, arch=a, epoch=e, 1916 ver=v, rel=r) 1917 if len(matches) > 1: 1918 updates.append(matches[0]) 1919 self.verbose_logger.log(logginglevels.DEBUG_1, 1920 _('More than one identical match in sack for %s'), 1921 matches[0]) 1922 elif len(matches) == 1: 1923 updates.append(matches[0]) 1924 else: 1925 self.verbose_logger.log(logginglevels.DEBUG_1, 1926 _('Nothing matches %s.%s %s:%s-%s from update'), n,a,e,v,r) 1927 if patterns: 1928 exactmatch, matched, unmatched = \ 1929 parsePackages(updates, patterns, casematch=not ignore_case) 1930 updates = exactmatch + matched 1931 1932 # installed only 1933 elif pkgnarrow == 'installed': 1934 installed = self.rpmdb.returnPackages(patterns=patterns, 1935 ignore_case=ic) 1936 1937 # available in a repository 1938 elif pkgnarrow == 'available': 1939 1940 if showdups: 1941 avail = self.pkgSack.returnPackages(patterns=patterns, 1942 ignore_case=ic) 1943 else: 1944 try: 1945 avail = self.pkgSack.returnNewestByNameArch(patterns=patterns, 1946 ignore_case=ic) 1947 except Errors.PackageSackError: 1948 avail = [] 1949 1950 for pkg in avail: 1951 if showdups: 1952 if self.rpmdb.contains(po=pkg): 1953 reinstall_available.append(pkg) 1954 else: 1955 available.append(pkg) 1956 else: 1957 ipkgs = self.rpmdb.searchNevra(pkg.name, arch=pkg.arch) 1958 if ipkgs: 1959 latest = sorted(ipkgs, reverse=True)[0] 1960 if not ipkgs or pkg.verGT(latest): 1961 available.append(pkg) 1962 elif pkg.verEQ(latest): 1963 reinstall_available.append(pkg) 1964 else: 1965 old_available.append(pkg) 1966 1967 # not in a repo but installed 1968 elif pkgnarrow == 'extras': 1969 # we must compare the installed set versus the repo set 1970 # anything installed but not in a repo is an extra 1971 avail = self.pkgSack.simplePkgList(patterns=patterns, 1972 ignore_case=ic) 1973 avail = set(avail) 1974 for po in self.rpmdb.returnPackages(patterns=patterns, 1975 ignore_case=ic): 1976 if po.pkgtup not in avail: 1977 extras.append(po) 1978 1979 # obsoleting packages (and what they obsolete) 1980 elif pkgnarrow == 'obsoletes': 1981 self.conf.obsoletes = 1 1982 1983 for (pkgtup, instTup) in self.up.getObsoletesTuples(): 1984 (n,a,e,v,r) = pkgtup 1985 pkgs = self.pkgSack.searchNevra(name=n, arch=a, ver=v, rel=r, epoch=e) 1986 instpo = self.getInstalledPackageObject(instTup) 1987 for po in pkgs: 1988 obsoletes.append(po) 1989 obsoletesTuples.append((po, instpo)) 1990 if patterns: 1991 exactmatch, matched, unmatched = \ 1992 parsePackages(obsoletes, patterns, casematch=not ignore_case) 1993 obsoletes = exactmatch + matched 1994 matched_obsoletes = set(obsoletes) 1995 nobsoletesTuples = [] 1996 for po, instpo in obsoletesTuples: 1997 if po not in matched_obsoletes: 1998 continue 1999 nobsoletesTuples.append((po, instpo)) 2000 obsoletesTuples = nobsoletesTuples 2001 if not showdups: 2002 obsoletes = packagesNewestByName(obsoletes) 2003 filt = set(obsoletes) 2004 nobsoletesTuples = [] 2005 for po, instpo in obsoletesTuples: 2006 if po not in filt: 2007 continue 2008 nobsoletesTuples.append((po, instpo)) 2009 obsoletesTuples = nobsoletesTuples 2010 2011 # packages recently added to the repositories 2012 elif pkgnarrow == 'recent': 2013 now = time.time() 2014 recentlimit = now-(self.conf.recent*86400) 2015 ftimehash = {} 2016 if showdups: 2017 avail = self.pkgSack.returnPackages(patterns=patterns, 2018 ignore_case=ic) 2019 else: 2020 try: 2021 avail = self.pkgSack.returnNewestByNameArch(patterns=patterns, 2022 ignore_case=ic) 2023 except Errors.PackageSackError: 2024 avail = [] 2025 2026 for po in avail: 2027 ftime = int(po.filetime) 2028 if ftime > recentlimit: 2029 if not ftimehash.has_key(ftime): 2030 ftimehash[ftime] = [po] 2031 else: 2032 ftimehash[ftime].append(po) 2033 2034 for sometime in ftimehash: 2035 for po in ftimehash[sometime]: 2036 recent.append(po) 2037 2038 2039 ygh.installed = installed 2040 ygh.available = available 2041 ygh.reinstall_available = reinstall_available 2042 ygh.old_available = old_available 2043 ygh.updates = updates 2044 ygh.obsoletes = obsoletes 2045 ygh.obsoletesTuples = obsoletesTuples 2046 ygh.recent = recent 2047 ygh.extras = extras 2048 2049 return ygh
2050 2051 2052
2053 - def findDeps(self, pkgs):
2054 """ 2055 Return the dependencies for a given package object list, as well 2056 possible solutions for those dependencies. 2057 2058 Returns the deps as a dict of dicts:: 2059 packageobject = [reqs] = [list of satisfying pkgs] 2060 """ 2061 2062 results = {} 2063 2064 for pkg in pkgs: 2065 results[pkg] = {} 2066 reqs = pkg.requires 2067 reqs.sort() 2068 pkgresults = results[pkg] # shorthand so we don't have to do the 2069 # double bracket thing 2070 2071 for req in reqs: 2072 (r,f,v) = req 2073 if r.startswith('rpmlib('): 2074 continue 2075 2076 satisfiers = [] 2077 2078 for po in self.whatProvides(r, f, v): 2079 satisfiers.append(po) 2080 2081 pkgresults[req] = satisfiers 2082 2083 return results
2084 2085 # pre 3.2.10 API used to always showdups, so that's the default atm.
2086 - def searchGenerator(self, fields, criteria, showdups=True, keys=False):
2087 """Generator method to lighten memory load for some searches. 2088 This is the preferred search function to use. Setting keys to True 2089 will use the search keys that matched in the sorting, and return 2090 the search keys in the results. """ 2091 sql_fields = [] 2092 for f in fields: 2093 if RPM_TO_SQLITE.has_key(f): 2094 sql_fields.append(RPM_TO_SQLITE[f]) 2095 else: 2096 sql_fields.append(f) 2097 2098 # yield the results in order of most terms matched first 2099 sorted_lists = {} # count_of_matches = [(pkgobj, 2100 # [search strings which matched], 2101 # [results that matched])] 2102 tmpres = [] 2103 real_crit = [] 2104 real_crit_lower = [] # Take the s.lower()'s out of the loop 2105 rcl2c = {} 2106 # weigh terms in given order (earlier = more relevant) 2107 critweight = 0 2108 critweights = {} 2109 for s in criteria: 2110 real_crit.append(s) 2111 real_crit_lower.append(s.lower()) 2112 rcl2c[s.lower()] = s 2113 critweights.setdefault(s, critweight) 2114 critweight -= 1 2115 2116 for sack in self.pkgSack.sacks.values(): 2117 tmpres.extend(sack.searchPrimaryFieldsMultipleStrings(sql_fields, real_crit)) 2118 2119 def results2sorted_lists(tmpres, sorted_lists): 2120 for (po, count) in tmpres: 2121 # check the pkg for sanity 2122 # pop it into the sorted lists 2123 tmpkeys = set() 2124 tmpvalues = [] 2125 if count not in sorted_lists: sorted_lists[count] = [] 2126 for s in real_crit_lower: 2127 for field in fields: 2128 value = to_unicode(getattr(po, field)) 2129 if value and value.lower().find(s) != -1: 2130 tmpvalues.append(value) 2131 tmpkeys.add(rcl2c[s]) 2132 2133 if len(tmpvalues) > 0: 2134 sorted_lists[count].append((po, tmpkeys, tmpvalues))
2135 results2sorted_lists(tmpres, sorted_lists) 2136 2137 tmpres = self.rpmdb.searchPrimaryFieldsMultipleStrings(fields, 2138 real_crit_lower, 2139 lowered=True) 2140 # close our rpmdb connection so we can ctrl-c, kthxbai 2141 self.closeRpmDB() 2142 2143 results2sorted_lists(tmpres, sorted_lists) 2144 del tmpres 2145 2146 tmpres = self.searchPackageTags(real_crit_lower) 2147 2148 results_by_pkg = {} # pkg=[list_of_tuples_of_values] 2149 2150 for pkg in tmpres: 2151 count = 0 2152 matchkeys = [] 2153 tagresults = [] 2154 for (match, taglist) in tmpres[pkg]: 2155 count += len(taglist) 2156 matchkeys.append(rcl2c[match]) 2157 tagresults.extend(taglist) 2158 if pkg not in results_by_pkg: 2159 results_by_pkg[pkg] = [] 2160 results_by_pkg[pkg].append((matchkeys, tagresults)) 2161 2162 del tmpres 2163 2164 # do the ones we already have 2165 for item in sorted_lists.values(): 2166 for pkg, keys, values in item: 2167 if pkg not in results_by_pkg: 2168 results_by_pkg[pkg] = [] 2169 results_by_pkg[pkg].append((keys,values)) 2170 2171 # take our existing dict-by-pkg and make the dict-by-count for 2172 # this bizarro sorted_lists format 2173 # FIXME - stab sorted_lists in the chest at some later date 2174 sorted_lists = {} 2175 for pkg in results_by_pkg: 2176 totkeys = [] 2177 totvals = [] 2178 for (keys, values) in results_by_pkg[pkg]: 2179 totkeys.extend(keys) 2180 totvals.extend(values) 2181 2182 totkeys = misc.unique(totkeys) 2183 totvals = misc.unique(totvals) 2184 count = len(totkeys) 2185 if count not in sorted_lists: 2186 sorted_lists[count] = [] 2187 sorted_lists[count].append((pkg, totkeys, totvals)) 2188 2189 # By default just sort using package sorting 2190 sort_func = operator.itemgetter(0) 2191 if keys: 2192 # Take into account the keys found, their original order, 2193 # and number of fields hit as well 2194 sort_func = lambda x: (-sum((critweights[y] for y in x[1])), 2195 "\0".join(sorted(x[1])), -len(x[2]), x[0]) 2196 yielded = {} 2197 for val in reversed(sorted(sorted_lists)): 2198 for (po, ks, vs) in sorted(sorted_lists[val], key=sort_func): 2199 if not showdups and (po.name, po.arch) in yielded: 2200 continue 2201 2202 if keys: 2203 yield (po, ks, vs) 2204 else: 2205 yield (po, vs) 2206 2207 if not showdups: 2208 yielded[(po.name, po.arch)] = 1 2209
2210 - def searchPackageTags(self, criteria):
2211 results = {} # name = [(criteria, taglist)] 2212 for c in criteria: 2213 c = c.lower() 2214 res = self.pkgtags.search_tags(c) 2215 for (name, taglist) in res.items(): 2216 pkgs = self.pkgSack.searchNevra(name=name) 2217 if not pkgs: 2218 continue 2219 pkg = pkgs[0] 2220 if pkg not in results: 2221 results[pkg] = [] 2222 results[pkg].append((c, taglist)) 2223 2224 return results
2225
2226 - def searchPackages(self, fields, criteria, callback=None):
2227 """Search specified fields for matches to criteria 2228 optional callback specified to print out results 2229 as you go. Callback is a simple function of: 2230 callback(po, matched values list). It will 2231 just return a dict of dict[po]=matched values list""" 2232 warnings.warn(_('searchPackages() will go away in a future version of Yum.\ 2233 Use searchGenerator() instead. \n'), 2234 Errors.YumFutureDeprecationWarning, stacklevel=2) 2235 matches = {} 2236 match_gen = self.searchGenerator(fields, criteria) 2237 2238 for (po, matched_strings) in match_gen: 2239 if callback: 2240 callback(po, matched_strings) 2241 if not matches.has_key(po): 2242 matches[po] = [] 2243 2244 matches[po].extend(matched_strings) 2245 2246 return matches
2247
2248 - def searchPackageProvides(self, args, callback=None, 2249 callback_has_matchfor=False):
2250 2251 matches = {} 2252 for arg in args: 2253 arg = to_unicode(arg) 2254 if not misc.re_glob(arg): 2255 isglob = False 2256 if arg[0] != '/': 2257 canBeFile = False 2258 else: 2259 canBeFile = True 2260 else: 2261 isglob = True 2262 canBeFile = misc.re_filename(arg) 2263 2264 if not isglob: 2265 usedDepString = True 2266 where = self.returnPackagesByDep(arg) 2267 else: 2268 usedDepString = False 2269 where = self.pkgSack.searchAll(arg, False) 2270 self.verbose_logger.log(logginglevels.DEBUG_1, 2271 _('Searching %d packages'), len(where)) 2272 2273 for po in where: 2274 self.verbose_logger.log(logginglevels.DEBUG_2, 2275 _('searching package %s'), po) 2276 tmpvalues = [] 2277 2278 if usedDepString: 2279 tmpvalues.append(arg) 2280 2281 if not isglob and canBeFile: 2282 # then it is not a globbed file we have matched it precisely 2283 tmpvalues.append(arg) 2284 2285 if isglob and canBeFile: 2286 self.verbose_logger.log(logginglevels.DEBUG_2, 2287 _('searching in file entries')) 2288 for thisfile in po.dirlist + po.filelist + po.ghostlist: 2289 if fnmatch.fnmatch(thisfile, arg): 2290 tmpvalues.append(thisfile) 2291 2292 2293 self.verbose_logger.log(logginglevels.DEBUG_2, 2294 _('searching in provides entries')) 2295 for (p_name, p_flag, (p_e, p_v, p_r)) in po.provides: 2296 prov = misc.prco_tuple_to_string((p_name, p_flag, (p_e, p_v, p_r))) 2297 if not usedDepString: 2298 if fnmatch.fnmatch(p_name, arg) or fnmatch.fnmatch(prov, arg): 2299 tmpvalues.append(prov) 2300 2301 if len(tmpvalues) > 0: 2302 if callback: # No matchfor, on globs 2303 if not isglob and callback_has_matchfor: 2304 callback(po, tmpvalues, args) 2305 else: 2306 callback(po, tmpvalues) 2307 matches[po] = tmpvalues 2308 2309 # installed rpms, too 2310 taglist = ['filelist', 'dirnames', 'provides_names'] 2311 for arg in args: 2312 if not misc.re_glob(arg): 2313 isglob = False 2314 if arg[0] != '/': 2315 canBeFile = False 2316 else: 2317 canBeFile = True 2318 else: 2319 isglob = True 2320 canBeFile = True 2321 2322 if not isglob: 2323 where = self.returnInstalledPackagesByDep(arg) 2324 usedDepString = True 2325 for po in where: 2326 tmpvalues = [] 2327 msg = _('Provides-match: %s') % to_unicode(arg) 2328 tmpvalues.append(msg) 2329 2330 if len(tmpvalues) > 0: 2331 if callback: 2332 if callback_has_matchfor: 2333 callback(po, tmpvalues, args) 2334 else: 2335 callback(po, tmpvalues) 2336 matches[po] = tmpvalues 2337 2338 else: 2339 usedDepString = False 2340 where = self.rpmdb 2341 2342 for po in where: 2343 searchlist = [] 2344 tmpvalues = [] 2345 for tag in taglist: 2346 tagdata = getattr(po, tag) 2347 if tagdata is None: 2348 continue 2349 if type(tagdata) is types.ListType: 2350 searchlist.extend(tagdata) 2351 else: 2352 searchlist.append(tagdata) 2353 2354 for item in searchlist: 2355 if fnmatch.fnmatch(item, arg): 2356 tmpvalues.append(item) 2357 2358 if len(tmpvalues) > 0: 2359 if callback: # No matchfor, on globs 2360 callback(po, tmpvalues) 2361 matches[po] = tmpvalues 2362 2363 2364 return matches
2365
2366 - def doGroupLists(self, uservisible=0, patterns=None, ignore_case=True):
2367 """returns two lists of groups, installed groups and available groups 2368 optional 'uservisible' bool to tell it whether or not to return 2369 only groups marked as uservisible""" 2370 2371 2372 installed = [] 2373 available = [] 2374 2375 if self.comps.compscount == 0: 2376 raise Errors.GroupsError, _('No group data available for configured repositories') 2377 2378 if patterns is None: 2379 grps = self.comps.groups 2380 else: 2381 grps = self.comps.return_groups(",".join(patterns), 2382 case_sensitive=not ignore_case) 2383 for grp in grps: 2384 if grp.installed: 2385 if uservisible: 2386 if grp.user_visible: 2387 installed.append(grp) 2388 else: 2389 installed.append(grp) 2390 else: 2391 if uservisible: 2392 if grp.user_visible: 2393 available.append(grp) 2394 else: 2395 available.append(grp) 2396 2397 return sorted(installed), sorted(available)
2398 2399
2400 - def groupRemove(self, grpid):
2401 """mark all the packages in this group to be removed""" 2402 2403 txmbrs_used = [] 2404 2405 thesegroups = self.comps.return_groups(grpid) 2406 if not thesegroups: 2407 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2408 2409 for thisgroup in thesegroups: 2410 thisgroup.toremove = True 2411 pkgs = thisgroup.packages 2412 for pkg in thisgroup.packages: 2413 txmbrs = self.remove(name=pkg, silence_warnings=True) 2414 txmbrs_used.extend(txmbrs) 2415 for txmbr in txmbrs: 2416 txmbr.groups.append(thisgroup.groupid) 2417 2418 return txmbrs_used
2419
2420 - def groupUnremove(self, grpid):
2421 """unmark any packages in the group from being removed""" 2422 2423 2424 thesegroups = self.comps.return_groups(grpid) 2425 if not thesegroups: 2426 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2427 2428 for thisgroup in thesegroups: 2429 thisgroup.toremove = False 2430 pkgs = thisgroup.packages 2431 for pkg in thisgroup.packages: 2432 for txmbr in self.tsInfo: 2433 if txmbr.po.name == pkg and txmbr.po.state in TS_INSTALL_STATES: 2434 try: 2435 txmbr.groups.remove(grpid) 2436 except ValueError: 2437 self.verbose_logger.log(logginglevels.DEBUG_1, 2438 _("package %s was not marked in group %s"), txmbr.po, 2439 grpid) 2440 continue 2441 2442 # if there aren't any other groups mentioned then remove the pkg 2443 if len(txmbr.groups) == 0: 2444 self.tsInfo.remove(txmbr.po.pkgtup)
2445 2446
2447 - def selectGroup(self, grpid, group_package_types=[], enable_group_conditionals=None):
2448 """mark all the packages in the group to be installed 2449 returns a list of transaction members it added to the transaction 2450 set 2451 Optionally take: 2452 group_package_types=List - overrides self.conf.group_package_types 2453 enable_group_conditionals=Bool - overrides self.conf.enable_group_conditionals 2454 """ 2455 2456 if not self.comps.has_group(grpid): 2457 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2458 2459 txmbrs_used = [] 2460 thesegroups = self.comps.return_groups(grpid) 2461 2462 if not thesegroups: 2463 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2464 2465 package_types = self.conf.group_package_types 2466 if group_package_types: 2467 package_types = group_package_types 2468 2469 for thisgroup in thesegroups: 2470 if thisgroup.selected: 2471 continue 2472 2473 thisgroup.selected = True 2474 2475 pkgs = [] 2476 if 'mandatory' in package_types: 2477 pkgs.extend(thisgroup.mandatory_packages) 2478 if 'default' in package_types: 2479 pkgs.extend(thisgroup.default_packages) 2480 if 'optional' in package_types: 2481 pkgs.extend(thisgroup.optional_packages) 2482 2483 for pkg in pkgs: 2484 self.verbose_logger.log(logginglevels.DEBUG_2, 2485 _('Adding package %s from group %s'), pkg, thisgroup.groupid) 2486 try: 2487 txmbrs = self.install(name = pkg) 2488 except Errors.InstallError, e: 2489 self.verbose_logger.debug(_('No package named %s available to be installed'), 2490 pkg) 2491 else: 2492 txmbrs_used.extend(txmbrs) 2493 for txmbr in txmbrs: 2494 txmbr.groups.append(thisgroup.groupid) 2495 2496 group_conditionals = self.conf.enable_group_conditionals 2497 if enable_group_conditionals is not None: # has to be this way so we can set it to False 2498 group_conditionals = enable_group_conditionals 2499 2500 if group_conditionals: 2501 for condreq, cond in thisgroup.conditional_packages.iteritems(): 2502 if self.isPackageInstalled(cond): 2503 try: 2504 txmbrs = self.install(name = condreq) 2505 except Errors.InstallError: 2506 # we don't care if the package doesn't exist 2507 continue 2508 else: 2509 if cond not in self.tsInfo.conditionals: 2510 self.tsInfo.conditionals[cond]=[] 2511 2512 txmbrs_used.extend(txmbrs) 2513 for txmbr in txmbrs: 2514 txmbr.groups.append(thisgroup.groupid) 2515 self.tsInfo.conditionals[cond].append(txmbr.po) 2516 continue 2517 # Otherwise we hook into tsInfo.add to make 2518 # sure we'll catch it if its added later in this transaction 2519 pkgs = self.pkgSack.searchNevra(name=condreq) 2520 if pkgs: 2521 if self.arch.multilib: 2522 if self.conf.multilib_policy == 'best': 2523 use = [] 2524 best = self.arch.legit_multi_arches 2525 best.append('noarch') 2526 for pkg in pkgs: 2527 if pkg.arch in best: 2528 use.append(pkg) 2529 pkgs = use 2530 2531 pkgs = packagesNewestByName(pkgs) 2532 2533 if not self.tsInfo.conditionals.has_key(cond): 2534 self.tsInfo.conditionals[cond] = [] 2535 self.tsInfo.conditionals[cond].extend(pkgs) 2536 return txmbrs_used
2537
2538 - def deselectGroup(self, grpid, force=False):
2539 """ Without the force option set, this removes packages from being 2540 installed that were added as part of installing one of the 2541 group(s). If the force option is set, then all installing packages 2542 in the group(s) are force removed from the transaction. """ 2543 2544 if not self.comps.has_group(grpid): 2545 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2546 2547 thesegroups = self.comps.return_groups(grpid) 2548 if not thesegroups: 2549 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2550 2551 for thisgroup in thesegroups: 2552 thisgroup.selected = False 2553 2554 for pkgname in thisgroup.packages: 2555 txmbrs = self.tsInfo.getMembersWithState(None,TS_INSTALL_STATES) 2556 for txmbr in txmbrs: 2557 if txmbr.po.name != pkgname: 2558 continue 2559 2560 if not force: 2561 try: 2562 txmbr.groups.remove(grpid) 2563 except ValueError: 2564 self.verbose_logger.log(logginglevels.DEBUG_1, 2565 _("package %s was not marked in group %s"), txmbr.po, 2566 grpid) 2567 continue 2568 2569 # If the pkg isn't part of any group, or the group is 2570 # being forced out ... then remove the pkg 2571 if force or len(txmbr.groups) == 0: 2572 self.tsInfo.remove(txmbr.po.pkgtup) 2573 for pkg in self.tsInfo.conditionals.get(txmbr.name, []): 2574 self.tsInfo.remove(pkg.pkgtup)
2575
2576 - def getPackageObject(self, pkgtup):
2577 """retrieves a packageObject from a pkgtuple - if we need 2578 to pick and choose which one is best we better call out 2579 to some method from here to pick the best pkgobj if there are 2580 more than one response - right now it's more rudimentary.""" 2581 2582 2583 # look it up in the self.localPackages first: 2584 for po in self.localPackages: 2585 if po.pkgtup == pkgtup: 2586 return po 2587 2588 pkgs = self.pkgSack.searchPkgTuple(pkgtup) 2589 2590 if len(pkgs) == 0: 2591 raise Errors.DepError, _('Package tuple %s could not be found in packagesack') % str(pkgtup) 2592 2593 if len(pkgs) > 1: # boy it'd be nice to do something smarter here FIXME 2594 result = pkgs[0] 2595 else: 2596 result = pkgs[0] # which should be the only 2597 2598 # this is where we could do something to figure out which repository 2599 # is the best one to pull from 2600 2601 return result
2602
2603 - def getInstalledPackageObject(self, pkgtup):
2604 """ Returns a YumInstalledPackage object for the pkgtup specified, or 2605 raises an exception. You should use this instead of 2606 searchPkgTuple() if you are assuming there is a value. """ 2607 2608 pkgs = self.rpmdb.searchPkgTuple(pkgtup) 2609 if len(pkgs) == 0: 2610 raise Errors.RpmDBError, _('Package tuple %s could not be found in rpmdb') % str(pkgtup) 2611 2612 # Dito. FIXME from getPackageObject() for len() > 1 ... :) 2613 po = pkgs[0] # take the first one 2614 return po
2615
2616 - def gpgKeyCheck(self):
2617 """checks for the presence of gpg keys in the rpmdb 2618 returns 0 if no keys returns 1 if keys""" 2619 2620 gpgkeyschecked = self.conf.cachedir + '/.gpgkeyschecked.yum' 2621 if os.path.exists(gpgkeyschecked): 2622 return 1 2623 2624 myts = rpmUtils.transaction.initReadOnlyTransaction(root=self.conf.installroot) 2625 myts.pushVSFlags(~(rpm._RPMVSF_NOSIGNATURES|rpm._RPMVSF_NODIGESTS)) 2626 idx = myts.dbMatch('name', 'gpg-pubkey') 2627 keys = idx.count() 2628 del idx 2629 del myts 2630 2631 if keys == 0: 2632 return 0 2633 else: 2634 mydir = os.path.dirname(gpgkeyschecked) 2635 if not os.path.exists(mydir): 2636 os.makedirs(mydir) 2637 2638 fo = open(gpgkeyschecked, 'w') 2639 fo.close() 2640 del fo 2641 return 1
2642
2643 - def returnPackagesByDep(self, depstring):
2644 """Pass in a generic [build]require string and this function will 2645 pass back the packages it finds providing that dep.""" 2646 2647 results = self.pkgSack.searchProvides(depstring) 2648 return results
2649 2650
2651 - def returnPackageByDep(self, depstring):
2652 """Pass in a generic [build]require string and this function will 2653 pass back the best(or first) package it finds providing that dep.""" 2654 2655 # we get all sorts of randomness here 2656 errstring = depstring 2657 if type(depstring) not in types.StringTypes: 2658 errstring = str(depstring) 2659 2660 try: 2661 pkglist = self.returnPackagesByDep(depstring) 2662 except Errors.YumBaseError: 2663 raise Errors.YumBaseError, _('No Package found for %s') % errstring 2664 2665 ps = ListPackageSack(pkglist) 2666 result = self._bestPackageFromList(ps.returnNewestByNameArch()) 2667 if result is None: 2668 raise Errors.YumBaseError, _('No Package found for %s') % errstring 2669 2670 return result
2671
2672 - def returnInstalledPackagesByDep(self, depstring):
2673 """Pass in a generic [build]require string and this function will 2674 pass back the installed packages it finds providing that dep.""" 2675 2676 # parse the string out 2677 # either it is 'dep (some operator) e:v-r' 2678 # or /file/dep 2679 # or packagename 2680 if type(depstring) == types.TupleType: 2681 (depname, depflags, depver) = depstring 2682 else: 2683 depname = depstring 2684 depflags = None 2685 depver = None 2686 2687 if depstring[0] != '/': 2688 # not a file dep - look at it for being versioned 2689 dep_split = depstring.split() 2690 if len(dep_split) == 3: 2691 depname, flagsymbol, depver = dep_split 2692 if not flagsymbol in SYMBOLFLAGS: 2693 raise Errors.YumBaseError, _('Invalid version flag') 2694 depflags = SYMBOLFLAGS[flagsymbol] 2695 2696 return self.rpmdb.getProvides(depname, depflags, depver).keys()
2697
2698 - def _bestPackageFromList(self, pkglist):
2699 """take list of package objects and return the best package object. 2700 If the list is empty, return None. 2701 2702 Note: this is not aware of multilib so make sure you're only 2703 passing it packages of a single arch group.""" 2704 2705 2706 if len(pkglist) == 0: 2707 return None 2708 2709 if len(pkglist) == 1: 2710 return pkglist[0] 2711 2712 bestlist = self._compare_providers(pkglist, None) 2713 return bestlist[0][0]
2714
2715 - def bestPackagesFromList(self, pkglist, arch=None, single_name=False):
2716 """Takes a list of packages, returns the best packages. 2717 This function is multilib aware so that it will not compare 2718 multilib to singlelib packages""" 2719 2720 returnlist = [] 2721 compatArchList = self.arch.get_arch_list(arch) 2722 multiLib = [] 2723 singleLib = [] 2724 noarch = [] 2725 for po in pkglist: 2726 if po.arch not in compatArchList: 2727 continue 2728 elif po.arch in ("noarch"): 2729 noarch.append(po) 2730 elif isMultiLibArch(arch=po.arch): 2731 multiLib.append(po) 2732 else: 2733 singleLib.append(po) 2734 2735 # we now have three lists. find the best package(s) of each 2736 multi = self._bestPackageFromList(multiLib) 2737 single = self._bestPackageFromList(singleLib) 2738 no = self._bestPackageFromList(noarch) 2739 2740 if single_name and multi and single and multi.name != single.name: 2741 # Sinlge _must_ match multi, if we want a single package name 2742 single = None 2743 2744 # now, to figure out which arches we actually want 2745 # if there aren't noarch packages, it's easy. multi + single 2746 if no is None: 2747 if multi: returnlist.append(multi) 2748 if single: returnlist.append(single) 2749 # if there's a noarch and it's newer than the multilib, we want 2750 # just the noarch. otherwise, we want multi + single 2751 elif multi: 2752 best = self._bestPackageFromList([multi,no]) 2753 if best.arch == "noarch": 2754 returnlist.append(no) 2755 else: 2756 if multi: returnlist.append(multi) 2757 if single: returnlist.append(single) 2758 # similar for the non-multilib case 2759 elif single: 2760 best = self._bestPackageFromList([single,no]) 2761 if best.arch == "noarch": 2762 returnlist.append(no) 2763 else: 2764 returnlist.append(single) 2765 # if there's not a multi or single lib, then we want the noarch 2766 else: 2767 returnlist.append(no) 2768 2769 return returnlist
2770 2771 # FIXME: This doesn't really work, as it assumes one obsoleter for each pkg 2772 # when we can have: 2773 # 1 pkg obsoleted by multiple pkgs _and_ 2774 # 1 pkg obsoleting multiple pkgs 2775 # ...and we need to detect loops, and get the arches "right" and do this 2776 # for chains. Atm. I hate obsoletes, and I can't get it to work better, 2777 # easily ... so screw it, don't create huge chains of obsoletes with some 2778 # loops in there too ... or I'll have to hurt you.
2779 - def _pkg2obspkg(self, po):
2780 """ Given a package return the package it's obsoleted by and so 2781 we should install instead. Or None if there isn't one. """ 2782 thispkgobsdict = self.up.checkForObsolete([po.pkgtup]) 2783 if po.pkgtup in thispkgobsdict: 2784 obsoleting = thispkgobsdict[po.pkgtup][0] 2785 obsoleting_pkg = self.getPackageObject(obsoleting) 2786 return obsoleting_pkg 2787 return None
2788
2789 - def _test_loop(self, node, next_func):
2790 """ Generic comp. sci. test for looping, walk the list with two pointers 2791 moving one twice as fast as the other. If they are ever == you have 2792 a loop. If loop we return None, if no loop the last element. """ 2793 slow = node 2794 done = False 2795 while True: 2796 next = next_func(node) 2797 if next is None and not done: return None 2798 if next is None: return node 2799 node = next_func(next) 2800 if node is None: return next 2801 done = True 2802 2803 slow = next_func(slow) 2804 if next == slow: 2805 return None
2806
2807 - def _at_groupinstall(self, pattern):
2808 " Do groupinstall via. leading @ on the cmd line, for install/update." 2809 assert pattern[0] == '@' 2810 group_string = pattern[1:] 2811 tx_return = [] 2812 for group in self.comps.return_groups(group_string): 2813 try: 2814 txmbrs = self.selectGroup(group.groupid) 2815 tx_return.extend(txmbrs) 2816 except yum.Errors.GroupsError: 2817 self.logger.critical(_('Warning: Group %s does not exist.'), group_string) 2818 continue 2819 return tx_return
2820
2821 - def _at_groupremove(self, pattern):
2822 " Do groupremove via. leading @ on the cmd line, for remove." 2823 assert pattern[0] == '@' 2824 group_string = pattern[1:] 2825 tx_return = [] 2826 try: 2827 txmbrs = self.groupRemove(group_string) 2828 except yum.Errors.GroupsError: 2829 self.logger.critical(_('No group named %s exists'), group_string) 2830 else: 2831 tx_return.extend(txmbrs) 2832 return tx_return
2833 2834 # Note that this returns available pkgs, and not txmbrs like the other 2835 # _at_group* functions.
2836 - def _at_groupdowngrade(self, pattern):
2837 " Do downgrade of a group via. leading @ on the cmd line." 2838 assert pattern[0] == '@' 2839 grpid = pattern[1:] 2840 2841 thesegroups = self.comps.return_groups(grpid) 2842 if not thesegroups: 2843 raise Errors.GroupsError, _("No Group named %s exists") % grpid 2844 pkgnames = set() 2845 for thisgroup in thesegroups: 2846 pkgnames.update(thisgroup.packages) 2847 return self.pkgSack.searchNames(pkgnames)
2848
2849 - def _find_obsoletees(self, po):
2850 """ Return the pkgs. that are obsoleted by the po we pass in. """ 2851 if not isinstance(po, YumLocalPackage): 2852 for (obstup, inst_tup) in self.up.getObsoletersTuples(name=po.name): 2853 if po.pkgtup == obstup: 2854 installed_pkg = self.getInstalledPackageObject(inst_tup) 2855 yield installed_pkg 2856 else: 2857 for (obs_n, obs_f, (obs_e, obs_v, obs_r)) in po.obsoletes: 2858 for pkg in self.rpmdb.searchNevra(name=obs_n): 2859 installedtup = (pkg.name, 'EQ', (pkg.epoch, 2860 pkg.ver, pkg.release)) 2861 if po.inPrcoRange('obsoletes', installedtup): 2862 yield pkg
2863
2864 - def _add_prob_flags(self, *flags):
2865 """ Add all of the passed flags to the tsInfo.probFilterFlags array. """ 2866 for flag in flags: 2867 if flag not in self.tsInfo.probFilterFlags: 2868 self.tsInfo.probFilterFlags.append(flag)
2869
2870 - def install(self, po=None, **kwargs):
2871 """try to mark for install the item specified. Uses provided package 2872 object, if available. If not it uses the kwargs and gets the best 2873 packages from the keyword options provided 2874 returns the list of txmbr of the items it installs 2875 2876 """ 2877 2878 pkgs = [] 2879 was_pattern = False 2880 if po: 2881 if isinstance(po, YumAvailablePackage) or isinstance(po, YumLocalPackage): 2882 pkgs.append(po) 2883 else: 2884 raise Errors.InstallError, _('Package Object was not a package object instance') 2885 2886 else: 2887 if not kwargs: 2888 raise Errors.InstallError, _('Nothing specified to install') 2889 2890 if 'pattern' in kwargs: 2891 if kwargs['pattern'][0] == '@': 2892 return self._at_groupinstall(kwargs['pattern']) 2893 2894 was_pattern = True 2895 pats = [kwargs['pattern']] 2896 mypkgs = self.pkgSack.returnPackages(patterns=pats, 2897 ignore_case=False) 2898 pkgs.extend(mypkgs) 2899 # if we have anything left unmatched, let's take a look for it 2900 # being a dep like glibc.so.2 or /foo/bar/baz 2901 2902 if not mypkgs: 2903 arg = kwargs['pattern'] 2904 self.verbose_logger.debug(_('Checking for virtual provide or file-provide for %s'), 2905 arg) 2906 2907 try: 2908 mypkgs = self.returnPackagesByDep(arg) 2909 except yum.Errors.YumBaseError, e: 2910 self.logger.critical(_('No Match for argument: %s') % arg) 2911 else: 2912 # install MTA* == fail, because provides don't do globs 2913 # install /usr/kerberos/bin/* == success (and we want 2914 # all of the pkgs) 2915 if mypkgs and not misc.re_glob(arg): 2916 mypkgs = self.bestPackagesFromList(mypkgs, 2917 single_name=True) 2918 if mypkgs: 2919 pkgs.extend(mypkgs) 2920 2921 else: 2922 nevra_dict = self._nevra_kwarg_parse(kwargs) 2923 2924 pkgs = self.pkgSack.searchNevra(name=nevra_dict['name'], 2925 epoch=nevra_dict['epoch'], arch=nevra_dict['arch'], 2926 ver=nevra_dict['version'], rel=nevra_dict['release']) 2927 2928 if pkgs: 2929 # if was_pattern or nevra-dict['arch'] is none, take the list 2930 # of arches based on our multilib_compat config and 2931 # toss out any pkgs of any arch NOT in that arch list 2932 2933 2934 # only do these things if we're multilib 2935 if self.arch.multilib: 2936 if was_pattern or not nevra_dict['arch']: # and only if they 2937 # they didn't specify an arch 2938 if self.conf.multilib_policy == 'best': 2939 pkgs_by_name = {} 2940 use = [] 2941 not_added = [] 2942 best = self.arch.legit_multi_arches 2943 best.append('noarch') 2944 for pkg in pkgs: 2945 if pkg.arch in best: 2946 pkgs_by_name[pkg.name] = 1 2947 use.append(pkg) 2948 else: 2949 not_added.append(pkg) 2950 for pkg in not_added: 2951 if not pkg.name in pkgs_by_name: 2952 use.append(pkg) 2953 2954 pkgs = use 2955 2956 pkgs = packagesNewestByName(pkgs) 2957 2958 pkgbyname = {} 2959 for pkg in pkgs: 2960 if not pkgbyname.has_key(pkg.name): 2961 pkgbyname[pkg.name] = [ pkg ] 2962 else: 2963 pkgbyname[pkg.name].append(pkg) 2964 2965 lst = [] 2966 for pkgs in pkgbyname.values(): 2967 lst.extend(self.bestPackagesFromList(pkgs)) 2968 pkgs = lst 2969 2970 2971 if not pkgs: 2972 # Do we still want to return errors here? 2973 # We don't in the cases below, so I didn't here... 2974 if 'pattern' in kwargs: 2975 pkgs = self.rpmdb.returnPackages(patterns=[kwargs['pattern']], 2976 ignore_case=False) 2977 if 'name' in kwargs: 2978 pkgs = self.rpmdb.searchNevra(name=kwargs['name']) 2979 if 'pkgtup' in kwargs: 2980 pkgs = self.rpmdb.searchNevra(name=kwargs['pkgtup'][0]) 2981 # Warning here does "weird" things when doing: 2982 # yum --disablerepo='*' install '*' 2983 # etc. ... see RHBZ#480402 2984 if False: 2985 for pkg in pkgs: 2986 self.verbose_logger.warning(_('Package %s installed and not available'), pkg) 2987 if pkgs: 2988 return [] 2989 raise Errors.InstallError, _('No package(s) available to install') 2990 2991 # FIXME - lots more checking here 2992 # - install instead of erase 2993 # - better error handling/reporting 2994 2995 2996 tx_return = [] 2997 for po in pkgs: 2998 if self.tsInfo.exists(pkgtup=po.pkgtup): 2999 if self.tsInfo.getMembersWithState(po.pkgtup, TS_INSTALL_STATES): 3000 self.verbose_logger.log(logginglevels.DEBUG_1, 3001 _('Package: %s - already in transaction set'), po) 3002 tx_return.extend(self.tsInfo.getMembers(pkgtup=po.pkgtup)) 3003 continue 3004 3005 # make sure this shouldn't be passed to update: 3006 if po.pkgtup in self.up.updating_dict: 3007 txmbrs = self.update(po=po) 3008 tx_return.extend(txmbrs) 3009 continue 3010 3011 # Make sure we're not installing a package which is obsoleted by 3012 # something else in the repo. Unless there is a obsoletion loop, 3013 # at which point ignore everything. 3014 obsoleting_pkg = self._test_loop(po, self._pkg2obspkg) 3015 if obsoleting_pkg is not None: 3016 # this is not a definitive check but it'll make sure we don't 3017 # pull in foo.i586 when foo.x86_64 already obsoletes the pkg and 3018 # is already installed 3019 already_obs = None 3020 poprovtup = (po.name, 'EQ', (po.epoch, po.ver, po.release)) 3021 for pkg in self.rpmdb.searchNevra(name=obsoleting_pkg.name): 3022 if pkg.inPrcoRange('obsoletes', poprovtup): 3023 already_obs = pkg 3024 continue 3025 3026 if already_obs: 3027 self.verbose_logger.warning(_('Package %s is obsoleted by %s which is already installed'), 3028 po, already_obs) 3029 else: 3030 self.verbose_logger.warning(_('Package %s is obsoleted by %s, trying to install %s instead'), 3031 po.name, obsoleting_pkg.name, obsoleting_pkg) 3032 tx_return.extend(self.install(po=obsoleting_pkg)) 3033 continue 3034 3035 # make sure it's not already installed 3036 if self.rpmdb.contains(po=po): 3037 if not self.tsInfo.getMembersWithState(po.pkgtup, TS_REMOVE_STATES): 3038 self.verbose_logger.warning(_('Package %s already installed and latest version'), po) 3039 continue 3040 3041 # make sure we don't have a name.arch of this already installed 3042 # if so pass it to update b/c it should be able to figure it out 3043 # if self.rpmdb.contains(name=po.name, arch=po.arch) and not self.allowedMultipleInstalls(po): 3044 if not self.allowedMultipleInstalls(po): 3045 found = True 3046 for ipkg in self.rpmdb.searchNevra(name=po.name, arch=po.arch): 3047 found = False 3048 if self.tsInfo.getMembersWithState(ipkg.pkgtup, TS_REMOVE_STATES): 3049 found = True 3050 break 3051 if not found: 3052 self.verbose_logger.warning(_('Package matching %s already installed. Checking for update.'), po) 3053 txmbrs = self.update(po=po) 3054 tx_return.extend(txmbrs) 3055 continue 3056 3057 3058 # at this point we are going to mark the pkg to be installed, make sure 3059 # it's not an older package that is allowed in due to multiple installs 3060 # or some other oddity. If it is - then modify the problem filter to cope 3061 3062 for ipkg in self.rpmdb.searchNevra(name=po.name, arch=po.arch): 3063 if ipkg.verEQ(po): 3064 self._add_prob_flags(rpm.RPMPROB_FILTER_REPLACEPKG, 3065 rpm.RPMPROB_FILTER_REPLACENEWFILES, 3066 rpm.RPMPROB_FILTER_REPLACEOLDFILES) 3067 # Yum needs the remove to happen before we allow the 3068 # install of the same version. But rpm doesn't like that 3069 # as it then has an install which removes the old version 3070 # and a remove, which also tries to remove the old version. 3071 self.tsInfo.remove(ipkg.pkgtup) 3072 break 3073 for ipkg in self.rpmdb.searchNevra(name=po.name): 3074 if ipkg.verGT(po) and not canCoinstall(ipkg.arch, po.arch): 3075 self._add_prob_flags(rpm.RPMPROB_FILTER_OLDPACKAGE) 3076 break 3077 3078 # it doesn't obsolete anything. If it does, mark that in the tsInfo, too 3079 if po.pkgtup in self.up.getObsoletesList(name=po.name): 3080 for obsoletee in self._find_obsoletees(po): 3081 txmbr = self.tsInfo.addObsoleting(po, obsoletee) 3082 self.tsInfo.addObsoleted(obsoletee, po) 3083 tx_return.append(txmbr) 3084 else: 3085 txmbr = self.tsInfo.addInstall(po) 3086 tx_return.append(txmbr) 3087 3088 return tx_return
3089
3090 - def _check_new_update_provides(self, opkg, npkg):
3091 """ Check for any difference in the provides of the old and new update 3092 that is needed by the transaction. If so we "update" those pkgs 3093 too, to the latest version. """ 3094 oprovs = set(opkg.returnPrco('provides')) 3095 nprovs = set(npkg.returnPrco('provides')) 3096 tx_return = [] 3097 for prov in oprovs.difference(nprovs): 3098 reqs = self.tsInfo.getRequires(*prov) 3099 for pkg in reqs: 3100 for req in reqs[pkg]: 3101 if not npkg.inPrcoRange('provides', req): 3102 naTup = (pkg.name, pkg.arch) 3103 for pkg in self.pkgSack.returnNewestByNameArch(naTup): 3104 tx_return.extend(self.update(po=pkg)) 3105 break 3106 return tx_return
3107
3108 - def _newer_update_in_trans(self, pkgtup, available_pkg, tx_return):
3109 """ We return True if there is a newer package already in the 3110 transaction. If there is an older one, we remove it (and update any 3111 deps. that aren't satisfied by the newer pkg) and return False so 3112 we'll update to this newer pkg. """ 3113 found = False 3114 for txmbr in self.tsInfo.getMembersWithState(pkgtup, [TS_UPDATED]): 3115 count = 0 3116 for po in txmbr.updated_by: 3117 if available_pkg.verLE(po): 3118 count += 1 3119 else: 3120 for ntxmbr in self.tsInfo.getMembers(po.pkgtup): 3121 self.tsInfo.remove(ntxmbr.po.pkgtup) 3122 txs = self._check_new_update_provides(ntxmbr.po, 3123 available_pkg) 3124 tx_return.extend(txs) 3125 if count: 3126 found = True 3127 else: 3128 self.tsInfo.remove(txmbr.po.pkgtup) 3129 return found
3130
3131 - def _add_up_txmbr(self, requiringPo, upkg, ipkg):
3132 txmbr = self.tsInfo.addUpdate(upkg, ipkg) 3133 if requiringPo: 3134 txmbr.setAsDep(requiringPo) 3135 if ('reason' in ipkg.yumdb_info and ipkg.yumdb_info.reason == 'dep'): 3136 txmbr.reason = 'dep' 3137 return txmbr
3138
3139 - def update(self, po=None, requiringPo=None, **kwargs):
3140 """try to mark for update the item(s) specified. 3141 po is a package object - if that is there, mark it for update, 3142 if possible 3143 else use **kwargs to match the package needing update 3144 if nothing is specified at all then attempt to update everything 3145 3146 returns the list of txmbr of the items it marked for update""" 3147 3148 # check for args - if no po nor kwargs, do them all 3149 # if po, do it, ignore all else 3150 # if no po do kwargs 3151 # uninstalled pkgs called for update get returned with errors in a list, maybe? 3152 3153 tx_return = [] 3154 if not po and not kwargs: # update everything (the easy case) 3155 self.verbose_logger.log(logginglevels.DEBUG_2, _('Updating Everything')) 3156 updates = self.up.getUpdatesTuples() 3157 if self.conf.obsoletes: 3158 obsoletes = self.up.getObsoletesTuples(newest=1) 3159 else: 3160 obsoletes = [] 3161 3162 for (obsoleting, installed) in obsoletes: 3163 obsoleting_pkg = self.getPackageObject(obsoleting) 3164 topkg = self._test_loop(obsoleting_pkg, self._pkg2obspkg) 3165 if topkg is not None: 3166 obsoleting_pkg = topkg 3167 installed_pkg = self.getInstalledPackageObject(installed) 3168 txmbr = self.tsInfo.addObsoleting(obsoleting_pkg, installed_pkg) 3169 self.tsInfo.addObsoleted(installed_pkg, obsoleting_pkg) 3170 if requiringPo: 3171 txmbr.setAsDep(requiringPo) 3172 tx_return.append(txmbr) 3173 3174 for (new, old) in updates: 3175 if self.tsInfo.isObsoleted(pkgtup=old): 3176 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s'), 3177 old) 3178 else: 3179 tx_return.extend(self.update(po=self.getPackageObject(new))) 3180 3181 return tx_return 3182 3183 # complications 3184 # the user has given us something - either a package object to be 3185 # added to the transaction as an update or they've given us a pattern 3186 # of some kind 3187 3188 instpkgs = [] 3189 availpkgs = [] 3190 if po: # just a po 3191 if po.repoid == 'installed': 3192 instpkgs.append(po) 3193 else: 3194 availpkgs.append(po) 3195 3196 3197 elif 'pattern' in kwargs: 3198 if kwargs['pattern'][0] == '@': 3199 return self._at_groupinstall(kwargs['pattern']) 3200 3201 (e, m, u) = self.rpmdb.matchPackageNames([kwargs['pattern']]) 3202 instpkgs.extend(e) 3203 instpkgs.extend(m) 3204 3205 if u: 3206 depmatches = [] 3207 arg = u[0] 3208 try: 3209 depmatches = self.returnInstalledPackagesByDep(arg) 3210 except yum.Errors.YumBaseError, e: 3211 self.logger.critical(_('%s') % e) 3212 3213 instpkgs.extend(depmatches) 3214 3215 # Always look for available packages, it doesn't seem to do any 3216 # harm (apart from some time). And it fixes weird edge cases where 3217 # "update a" (which requires a new b) is different from "update b" 3218 try: 3219 pats = [kwargs['pattern']] 3220 m = self.pkgSack.returnNewestByNameArch(patterns=pats) 3221 except Errors.PackageSackError: 3222 m = [] 3223 availpkgs.extend(m) 3224 3225 if not availpkgs and not instpkgs: 3226 self.logger.critical(_('No Match for argument: %s') % arg) 3227 3228 else: # we have kwargs, sort them out. 3229 nevra_dict = self._nevra_kwarg_parse(kwargs) 3230 3231 instpkgs = self.rpmdb.searchNevra(name=nevra_dict['name'], 3232 epoch=nevra_dict['epoch'], arch=nevra_dict['arch'], 3233 ver=nevra_dict['version'], rel=nevra_dict['release']) 3234 3235 if not instpkgs: 3236 availpkgs = self.pkgSack.searchNevra(name=nevra_dict['name'], 3237 epoch=nevra_dict['epoch'], arch=nevra_dict['arch'], 3238 ver=nevra_dict['version'], rel=nevra_dict['release']) 3239 if len(availpkgs) > 1: 3240 availpkgs = self._compare_providers(availpkgs, requiringPo) 3241 availpkgs = map(lambda x: x[0], availpkgs) 3242 3243 3244 # for any thing specified 3245 # get the list of available pkgs matching it (or take the po) 3246 # get the list of installed pkgs matching it (or take the po) 3247 # go through each list and look for: 3248 # things obsoleting it if it is an installed pkg 3249 # things it updates if it is an available pkg 3250 # things updating it if it is an installed pkg 3251 # in that order 3252 # all along checking to make sure we: 3253 # don't update something that's already been obsoleted 3254 # don't update something that's already been updated 3255 3256 # if there are more than one package that matches an update from 3257 # a pattern/kwarg then: 3258 # if it is a valid update and we' 3259 3260 # TODO: we should search the updates and obsoletes list and 3261 # mark the package being updated or obsoleted away appropriately 3262 # and the package relationship in the tsInfo 3263 3264 3265 # check for obsoletes first 3266 if self.conf.obsoletes: 3267 for installed_pkg in instpkgs: 3268 obs_tups = self.up.obsoleted_dict.get(installed_pkg.pkgtup, []) 3269 # This is done so we don't have to returnObsoletes(newest=True) 3270 # It's a minor UI problem for RHEL, but might as well dtrt. 3271 obs_pkgs = [self.getPackageObject(tup) for tup in obs_tups] 3272 for obsoleting_pkg in packagesNewestByName(obs_pkgs): 3273 tx_return.extend(self.install(po=obsoleting_pkg)) 3274 for available_pkg in availpkgs: 3275 for obsoleted in self.up.obsoleting_dict.get(available_pkg.pkgtup, []): 3276 obsoleted_pkg = self.getInstalledPackageObject(obsoleted) 3277 txmbr = self.tsInfo.addObsoleting(available_pkg, obsoleted_pkg) 3278 if requiringPo: 3279 txmbr.setAsDep(requiringPo) 3280 tx_return.append(txmbr) 3281 if self.tsInfo.isObsoleted(obsoleted): 3282 self.verbose_logger.log(logginglevels.DEBUG_2, _('Package is already obsoleted: %s.%s %s:%s-%s'), obsoleted) 3283 else: 3284 txmbr = self.tsInfo.addObsoleted(obsoleted_pkg, available_pkg) 3285 tx_return.append(txmbr) 3286 3287 for installed_pkg in instpkgs: 3288 for updating in self.up.updatesdict.get(installed_pkg.pkgtup, []): 3289 po = self.getPackageObject(updating) 3290 if self.tsInfo.isObsoleted(installed_pkg.pkgtup): 3291 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s'), 3292 installed_pkg.pkgtup) 3293 # at this point we are going to mark the pkg to be installed, make sure 3294 # it doesn't obsolete anything. If it does, mark that in the tsInfo, too 3295 elif po.pkgtup in self.up.getObsoletesList(name=po.name): 3296 for obsoletee in self._find_obsoletees(po): 3297 txmbr = self.tsInfo.addUpdate(po, installed_pkg) 3298 if requiringPo: 3299 txmbr.setAsDep(requiringPo) 3300 self.tsInfo.addObsoleting(po, obsoletee) 3301 self.tsInfo.addObsoleted(obsoletee, po) 3302 tx_return.append(txmbr) 3303 else: 3304 txmbr = self._add_up_txmbr(requiringPo, po, installed_pkg) 3305 tx_return.append(txmbr) 3306 3307 for available_pkg in availpkgs: 3308 # Make sure we're not installing a package which is obsoleted by 3309 # something else in the repo. Unless there is a obsoletion loop, 3310 # at which point ignore everything. 3311 obsoleting_pkg = self._test_loop(available_pkg, self._pkg2obspkg) 3312 if obsoleting_pkg is not None: 3313 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is obsoleted: %s'), available_pkg) 3314 tx_return.extend(self.update(po=obsoleting_pkg)) 3315 continue 3316 for updated in self.up.updating_dict.get(available_pkg.pkgtup, []): 3317 if self.tsInfo.isObsoleted(updated): 3318 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s'), 3319 updated) 3320 elif self._newer_update_in_trans(updated, available_pkg, 3321 tx_return): 3322 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already updated: %s.%s %s:%s-%s'), 3323 updated) 3324 3325 else: 3326 updated_pkg = self.getInstalledPackageObject(updated) 3327 txmbr = self._add_up_txmbr(requiringPo, 3328 available_pkg, updated_pkg) 3329 tx_return.append(txmbr) 3330 3331 # check to see if the pkg we want to install is not _quite_ the newest 3332 # one but still technically an update over what is installed. 3333 #FIXME - potentially do the comparables thing from what used to 3334 # be in cli.installPkgs() to see what we should be comparing 3335 # it to of what is installed. in the meantime name.arch is 3336 # most likely correct 3337 pot_updated = self.rpmdb.searchNevra(name=available_pkg.name, arch=available_pkg.arch) 3338 if pot_updated and self.allowedMultipleInstalls(available_pkg): 3339 # only compare against the newest of what's installed for kernel 3340 pot_updated = sorted(pot_updated)[-1:] 3341 3342 for ipkg in pot_updated: 3343 if self.tsInfo.isObsoleted(ipkg.pkgtup): 3344 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already obsoleted: %s.%s %s:%s-%s'), 3345 ipkg.pkgtup) 3346 elif self._newer_update_in_trans(ipkg.pkgtup, available_pkg, 3347 tx_return): 3348 self.verbose_logger.log(logginglevels.DEBUG_2, _('Not Updating Package that is already updated: %s.%s %s:%s-%s'), 3349 ipkg.pkgtup) 3350 elif ipkg.verLT(available_pkg): 3351 txmbr = self._add_up_txmbr(requiringPo, available_pkg, ipkg) 3352 tx_return.append(txmbr) 3353 3354 for txmbr in tx_return: 3355 for i_pkg in self.rpmdb.searchNevra(name=txmbr.name): 3356 if i_pkg not in txmbr.updates: 3357 if self._does_this_update(txmbr.po, i_pkg): 3358 self.tsInfo.addUpdated(i_pkg, txmbr.po) 3359 3360 return tx_return
3361
3362 - def remove(self, po=None, **kwargs):
3363 """try to find and mark for remove the specified package(s) - 3364 if po is specified then that package object (if it is installed) 3365 will be marked for removal. 3366 if no po then look at kwargs, if neither then raise an exception""" 3367 3368 if not po and not kwargs: 3369 raise Errors.RemoveError, 'Nothing specified to remove' 3370 3371 tx_return = [] 3372 pkgs = [] 3373 3374 3375 if po: 3376 pkgs = [po] 3377 else: 3378 if 'pattern' in kwargs: 3379 if kwargs['pattern'][0] == '@': 3380 return self._at_groupremove(kwargs['pattern']) 3381 3382 (e,m,u) = self.rpmdb.matchPackageNames([kwargs['pattern']]) 3383 pkgs.extend(e) 3384 pkgs.extend(m) 3385 if u: 3386 depmatches = [] 3387 arg = u[0] 3388 try: 3389 depmatches = self.returnInstalledPackagesByDep(arg) 3390 except yum.Errors.YumBaseError, e: 3391 self.logger.critical(_('%s') % e) 3392 3393 if not depmatches: 3394 self.logger.critical(_('No Match for argument: %s') % arg) 3395 else: 3396 pkgs.extend(depmatches) 3397 3398 else: 3399 nevra_dict = self._nevra_kwarg_parse(kwargs) 3400 3401 pkgs = self.rpmdb.searchNevra(name=nevra_dict['name'], 3402 epoch=nevra_dict['epoch'], arch=nevra_dict['arch'], 3403 ver=nevra_dict['version'], rel=nevra_dict['release']) 3404 3405 if len(pkgs) == 0: 3406 if not kwargs.get('silence_warnings', False): 3407 self.logger.warning(_("No package matched to remove")) 3408 3409 for po in pkgs: 3410 txmbr = self.tsInfo.addErase(po) 3411 tx_return.append(txmbr) 3412 3413 return tx_return
3414
3415 - def installLocal(self, pkg, po=None, updateonly=False):
3416 """ 3417 handles installs/updates of rpms provided on the filesystem in a 3418 local dir (ie: not from a repo) 3419 3420 Return the added transaction members. 3421 3422 @param pkg: a path to an rpm file on disk. 3423 @param po: A YumLocalPackage 3424 @param updateonly: Whether or not true installs are valid. 3425 """ 3426 3427 # read in the package into a YumLocalPackage Object 3428 # append it to self.localPackages 3429 # check if it can be installed or updated based on nevra versus rpmdb 3430 # don't import the repos until we absolutely need them for depsolving 3431 tx_return = [] 3432 installpkgs = [] 3433 updatepkgs = [] 3434 donothingpkgs = [] 3435 3436 if not po: 3437 try: 3438 po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg, 3439 ua=default_grabber.opts.user_agent) 3440 except Errors.MiscError: 3441 self.logger.critical(_('Cannot open: %s. Skipping.'), pkg) 3442 return tx_return 3443 self.verbose_logger.log(logginglevels.INFO_2, 3444 _('Examining %s: %s'), po.localpath, po) 3445 3446 # if by any chance we're a noncompat arch rpm - bail and throw out an error 3447 # FIXME -our archlist should be stored somewhere so we don't have to 3448 # do this: but it's not a config file sort of thing 3449 # FIXME: Should add noarch, yum localinstall works ... 3450 # just rm this method? 3451 if po.arch not in self.arch.archlist: 3452 self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch) 3453 return tx_return 3454 3455 # everything installed that matches the name 3456 installedByKey = self.rpmdb.searchNevra(name=po.name) 3457 # go through each package 3458 if len(installedByKey) == 0: # nothing installed by that name 3459 if updateonly: 3460 self.logger.warning(_('Package %s not installed, cannot update it. Run yum install to install it instead.'), po.name) 3461 return tx_return 3462 else: 3463 installpkgs.append(po) 3464 3465 for installed_pkg in installedByKey: 3466 if po.verGT(installed_pkg): # we're newer - this is an update, pass to them 3467 if installed_pkg.name in self.conf.exactarchlist: 3468 if po.arch == installed_pkg.arch: 3469 updatepkgs.append((po, installed_pkg)) 3470 else: 3471 donothingpkgs.append(po) 3472 else: 3473 updatepkgs.append((po, installed_pkg)) 3474 elif po.verEQ(installed_pkg): 3475 if (po.arch != installed_pkg.arch and 3476 (isMultiLibArch(po.arch) or 3477 isMultiLibArch(installed_pkg.arch))): 3478 installpkgs.append(po) 3479 else: 3480 donothingpkgs.append(po) 3481 elif self.allowedMultipleInstalls(po): 3482 installpkgs.append(po) 3483 else: 3484 donothingpkgs.append(po) 3485 3486 # handle excludes for a localinstall 3487 check_pkgs = installpkgs + [x[0] for x in updatepkgs] 3488 if self._is_local_exclude(po, check_pkgs): 3489 self.verbose_logger.debug(_('Excluding %s'), po) 3490 return tx_return 3491 3492 for po in installpkgs: 3493 self.verbose_logger.log(logginglevels.INFO_2, 3494 _('Marking %s to be installed'), po.localpath) 3495 self.localPackages.append(po) 3496 tx_return.extend(self.install(po=po)) 3497 3498 for (po, oldpo) in updatepkgs: 3499 self.verbose_logger.log(logginglevels.INFO_2, 3500 _('Marking %s as an update to %s'), po.localpath, oldpo) 3501 self.localPackages.append(po) 3502 txmbrs = self.update(po=po) 3503 tx_return.extend(txmbrs) 3504 3505 for po in donothingpkgs: 3506 self.verbose_logger.log(logginglevels.INFO_2, 3507 _('%s: does not update installed package.'), po.localpath) 3508 3509 # this checks to make sure that any of the to-be-installed pkgs 3510 # does not obsolete something else that's installed 3511 # this doesn't handle the localpkgs obsoleting EACH OTHER or 3512 # anything else in the transaction set, though. That could/should 3513 # be fixed later but a fair bit of that is a pebkac and should be 3514 # said as "don't do that". potential 'fixme' 3515 for txmbr in tx_return: 3516 if txmbr.po.obsoletes: 3517 for obs_pkg in self._find_obsoletees(txmbr.po): 3518 self.tsInfo.addObsoleted(obs_pkg, txmbr.po) 3519 txmbr.obsoletes.append(obs_pkg) 3520 self.tsInfo.addObsoleting(txmbr.po,obs_pkg) 3521 3522 return tx_return
3523
3524 - def reinstallLocal(self, pkg, po=None):
3525 """ 3526 handles reinstall of rpms provided on the filesystem in a 3527 local dir (ie: not from a repo) 3528 3529 Return the added transaction members. 3530 3531 @param pkg: a path to an rpm file on disk. 3532 @param po: A YumLocalPackage 3533 """ 3534 3535 if not po: 3536 try: 3537 po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg, 3538 ua=default_grabber.opts.user_agent) 3539 except Errors.MiscError: 3540 self.logger.critical(_('Cannot open file: %s. Skipping.'), pkg) 3541 return [] 3542 self.verbose_logger.log(logginglevels.INFO_2, 3543 _('Examining %s: %s'), po.localpath, po) 3544 3545 if po.arch not in self.arch.archlist: 3546 self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch) 3547 return [] 3548 3549 # handle excludes for a local reinstall 3550 if self._is_local_exclude(po, [po]): 3551 self.verbose_logger.debug(_('Excluding %s'), po) 3552 return [] 3553 3554 return self.reinstall(po=po)
3555
3556 - def reinstall(self, po=None, **kwargs):
3557 """Setup the problem filters to allow a reinstall to work, then 3558 pass everything off to install""" 3559 3560 self._add_prob_flags(rpm.RPMPROB_FILTER_REPLACEPKG, 3561 rpm.RPMPROB_FILTER_REPLACENEWFILES, 3562 rpm.RPMPROB_FILTER_REPLACEOLDFILES) 3563 3564 tx_mbrs = [] 3565 if po: # The po, is the "available" po ... we want the installed po 3566 tx_mbrs.extend(self.remove(pkgtup=po.pkgtup)) 3567 else: 3568 tx_mbrs.extend(self.remove(**kwargs)) 3569 if not tx_mbrs: 3570 raise Errors.ReinstallRemoveError, _("Problem in reinstall: no package matched to remove") 3571 templen = len(tx_mbrs) 3572 # this is a reinstall, so if we can't reinstall exactly what we uninstalled 3573 # then we really shouldn't go on 3574 new_members = [] 3575 failed = [] 3576 for item in tx_mbrs[:]: 3577 #FIXME future - if things in the rpm transaction handling get 3578 # a bit finer-grained, then we should allow reinstalls of kernels 3579 # for now, banned and dropped. 3580 if self.allowedMultipleInstalls(item.po): 3581 self.tsInfo.remove(item.pkgtup) 3582 tx_mbrs.remove(item) 3583 msg = _("Package %s is allowed multiple installs, skipping") % item.po 3584 self.verbose_logger.log(logginglevels.INFO_2, msg) 3585 continue 3586 3587 # Make sure obsoletes processing is off, so we can reinstall() 3588 # pkgs that are obsolete. 3589 old_conf_obs = self.conf.obsoletes 3590 self.conf.obsoletes = False 3591 if isinstance(po, YumLocalPackage): 3592 members = self.install(po=po) 3593 else: 3594 members = self.install(pkgtup=item.pkgtup) 3595 self.conf.obsoletes = old_conf_obs 3596 if len(members) == 0: 3597 self.tsInfo.remove(item.pkgtup) 3598 tx_mbrs.remove(item) 3599 failed.append(str(item.po)) 3600 continue 3601 new_members.extend(members) 3602 3603 if failed and not tx_mbrs: 3604 raise Errors.ReinstallInstallError, _("Problem in reinstall: no package %s matched to install") % ", ".join(failed) 3605 tx_mbrs.extend(new_members) 3606 return tx_mbrs
3607
3608 - def downgradeLocal(self, pkg, po=None):
3609 """ 3610 handles downgrades of rpms provided on the filesystem in a 3611 local dir (ie: not from a repo) 3612 3613 Return the added transaction members. 3614 3615 @param pkg: a path to an rpm file on disk. 3616 @param po: A YumLocalPackage 3617 """ 3618 3619 if not po: 3620 try: 3621 po = YumUrlPackage(self, ts=self.rpmdb.readOnlyTS(), url=pkg, 3622 ua=default_grabber.opts.user_agent) 3623 except Errors.MiscError: 3624 self.logger.critical(_('Cannot open file: %s. Skipping.'), pkg) 3625 return [] 3626 self.verbose_logger.log(logginglevels.INFO_2, 3627 _('Examining %s: %s'), po.localpath, po) 3628 3629 if po.arch not in self.arch.archlist: 3630 self.logger.critical(_('Cannot add package %s to transaction. Not a compatible architecture: %s'), pkg, po.arch) 3631 return [] 3632 3633 # handle excludes for a local downgrade 3634 if self._is_local_exclude(po, [po]): 3635 self.verbose_logger.debug(_('Excluding %s'), po) 3636 return [] 3637 3638 return self.downgrade(po=po)
3639
3640 - def _is_local_exclude(self, po, pkglist):
3641 """returns True if the local pkg should be excluded""" 3642 3643 if "all" in self.conf.disable_excludes or \ 3644 "main" in self.conf.disable_excludes: 3645 return False 3646 3647 toexc = [] 3648 if len(self.conf.exclude) > 0: 3649 exactmatch, matched, unmatched = \ 3650 parsePackages(pkglist, self.conf.exclude, casematch=1) 3651 toexc = exactmatch + matched 3652 3653 if po in toexc: 3654 return True 3655 3656 return False
3657
3658 - def downgrade(self, po=None, **kwargs):
3659 """ Try to downgrade a package. Works like: 3660 % yum shell <<EOL 3661 remove abcd 3662 install abcd-<old-version> 3663 run 3664 EOL """ 3665 3666 if not po and not kwargs: 3667 raise Errors.DowngradeError, 'Nothing specified to remove' 3668 3669 doing_group_pkgs = False 3670 if po: 3671 apkgs = [po] 3672 elif 'pattern' in kwargs: 3673 if kwargs['pattern'][0] == '@': 3674 apkgs = self._at_groupdowngrade(kwargs['pattern']) 3675 doing_group_pkgs = True # Don't warn. about some things 3676 else: 3677 apkgs = self.pkgSack.returnPackages(patterns=[kwargs['pattern']], 3678 ignore_case=False) 3679 if not apkgs: 3680 arg = kwargs['pattern'] 3681 self.verbose_logger.debug(_('Checking for virtual provide or file-provide for %s'), 3682 arg) 3683 3684 try: 3685 apkgs = self.returnPackagesByDep(arg) 3686 except yum.Errors.YumBaseError, e: 3687 self.logger.critical(_('No Match for argument: %s') % arg) 3688 3689 else: 3690 nevra_dict = self._nevra_kwarg_parse(kwargs) 3691 apkgs = self.pkgSack.searchNevra(name=nevra_dict['name'], 3692 epoch=nevra_dict['epoch'], 3693 arch=nevra_dict['arch'], 3694 ver=nevra_dict['version'], 3695 rel=nevra_dict['release']) 3696 if not apkgs: 3697 # Do we still want to return errors here? 3698 # We don't in the cases below, so I didn't here... 3699 pkgs = [] 3700 if 'pattern' in kwargs: 3701 pkgs = self.rpmdb.returnPackages(patterns=[kwargs['pattern']], 3702 ignore_case=False) 3703 if 'name' in kwargs: 3704 pkgs = self.rpmdb.searchNevra(name=kwargs['name']) 3705 if pkgs: 3706 return [] 3707 raise Errors.DowngradeError, _('No package(s) available to downgrade') 3708 3709 warned_nas = set() 3710 # Skip kernel etc. 3711 tapkgs = [] 3712 for pkg in apkgs: 3713 if self.allowedMultipleInstalls(pkg): 3714 if (pkg.name, pkg.arch) not in warned_nas: 3715 msg = _("Package %s is allowed multiple installs, skipping") % pkg 3716 self.verbose_logger.log(logginglevels.INFO_2, msg) 3717 warned_nas.add((pkg.name, pkg.arch)) 3718 continue 3719 tapkgs.append(pkg) 3720 apkgs = tapkgs 3721 3722 # Find installed versions of "to downgrade pkgs" 3723 apkg_names = set() 3724 for pkg in apkgs: 3725 apkg_names.add(pkg.name) 3726 ipkgs = self.rpmdb.searchNames(list(apkg_names)) 3727 3728 latest_installed_na = {} 3729 latest_installed_n = {} 3730 for pkg in sorted(ipkgs): 3731 if (pkg.name not in latest_installed_n or 3732 pkg.verGT(latest_installed_n[pkg.name][0])): 3733 latest_installed_n[pkg.name] = [pkg] 3734 elif pkg.verEQ(latest_installed_n[pkg.name][0]): 3735 latest_installed_n[pkg.name].append(pkg) 3736 latest_installed_na[(pkg.name, pkg.arch)] = pkg 3737 3738 # Find "latest downgrade", ie. latest available pkg before 3739 # installed version. Indexed fromn the latest installed pkgtup. 3740 downgrade_apkgs = {} 3741 for pkg in sorted(apkgs): 3742 na = (pkg.name, pkg.arch) 3743 3744 # Here we allow downgrades from .i386 => .noarch, or .i586 => .i386 3745 # but not .i386 => .x86_64 (similar to update). 3746 lipkg = None 3747 if na in latest_installed_na: 3748 lipkg = latest_installed_na[na] 3749 elif pkg.name in latest_installed_n: 3750 for tlipkg in latest_installed_n[pkg.name]: 3751 if not canCoinstall(pkg.arch, tlipkg.arch): 3752 lipkg = tlipkg 3753 # Use this so we don't get confused when we have 3754 # different versions with different arches. 3755 na = (pkg.name, lipkg.arch) 3756 break 3757 3758 if lipkg is None: 3759 if (na not in warned_nas and not doing_group_pkgs and 3760 pkg.name not in latest_installed_n): 3761 msg = _('No Match for available package: %s') % pkg 3762 self.logger.critical(msg) 3763 warned_nas.add(na) 3764 continue 3765 3766 if pkg.verGE(lipkg): 3767 if na not in warned_nas: 3768 msg = _('Only Upgrade available on package: %s') % pkg 3769 self.logger.critical(msg) 3770 warned_nas.add(na) 3771 continue 3772 3773 warned_nas.add(na) 3774 if (lipkg.pkgtup in downgrade_apkgs and 3775 pkg.verLE(downgrade_apkgs[lipkg.pkgtup])): 3776 continue # Skip older than "latest downgrade" 3777 downgrade_apkgs[lipkg.pkgtup] = pkg 3778 3779 tx_return = [] 3780 for ipkg in ipkgs: 3781 if ipkg.pkgtup not in downgrade_apkgs: 3782 continue 3783 txmbrs = self.tsInfo.addDowngrade(downgrade_apkgs[ipkg.pkgtup],ipkg) 3784 if not txmbrs: # Fail? 3785 continue 3786 self._add_prob_flags(rpm.RPMPROB_FILTER_OLDPACKAGE) 3787 tx_return.extend(txmbrs) 3788 3789 return tx_return
3790
3791 - def _nevra_kwarg_parse(self, kwargs):
3792 3793 returndict = {} 3794 3795 if 'pkgtup' in kwargs: 3796 (n, a, e, v, r) = kwargs['pkgtup'] 3797 returndict['name'] = n 3798 returndict['epoch'] = e 3799 returndict['arch'] = a 3800 returndict['version'] = v 3801 returndict['release'] = r 3802 return returndict 3803 3804 returndict['name'] = kwargs.get('name') 3805 returndict['epoch'] = kwargs.get('epoch') 3806 returndict['arch'] = kwargs.get('arch') 3807 # get them as ver, version and rel, release - if someone 3808 # specifies one of each then that's kinda silly. 3809 returndict['version'] = kwargs.get('version') 3810 if returndict['version'] is None: 3811 returndict['version'] = kwargs.get('ver') 3812 3813 returndict['release'] = kwargs.get('release') 3814 if returndict['release'] is None: 3815 returndict['release'] = kwargs.get('rel') 3816 3817 return returndict
3818
3819 - def history_redo(self, transaction):
3820 """ Given a valid historical transaction object, try and repeat 3821 that transaction. """ 3822 # NOTE: This is somewhat basic atm. ... see comment in undo. 3823 # Also note that redo doesn't force install Dep-Install packages, 3824 # which is probably what is wanted the majority of the time. 3825 old_conf_obs = self.conf.obsoletes 3826 self.conf.obsoletes = False 3827 done = False 3828 for pkg in transaction.trans_data: 3829 if pkg.state == 'Reinstall': 3830 if self.reinstall(pkgtup=pkg.pkgtup): 3831 done = True 3832 for pkg in transaction.trans_data: 3833 if pkg.state == 'Downgrade': 3834 try: 3835 if self.downgrade(pkgtup=pkg.pkgtup): 3836 done = True 3837 except yum.Errors.DowngradeError: 3838 self.logger.critical(_('Failed to downgrade: %s'), pkg) 3839 for pkg in transaction.trans_data: 3840 if pkg.state == 'Update': 3841 if self.update(pkgtup=pkg.pkgtup): 3842 done = True 3843 for pkg in transaction.trans_data: 3844 if pkg.state in ('Install', 'True-Install', 'Obsoleting'): 3845 if self.install(pkgtup=pkg.pkgtup): 3846 done = True 3847 for pkg in transaction.trans_data: 3848 if pkg.state == 'Erase': 3849 if self.remove(pkgtup=pkg.pkgtup): 3850 done = True 3851 self.conf.obsoletes = old_conf_obs 3852 return done
3853
3854 - def history_undo(self, transaction):
3855 """ Given a valid historical transaction object, try and undo 3856 that transaction. """ 3857 # NOTE: This is somewhat basic atm. ... for instance we don't check 3858 # that we are going from the old new version. However it's still 3859 # better than the RHN rollback code, and people pay for that :). 3860 # We turn obsoletes off because we want the specific versions of stuff 3861 # from history ... even if they've been obsoleted since then. 3862 old_conf_obs = self.conf.obsoletes 3863 self.conf.obsoletes = False 3864 done = False 3865 for pkg in transaction.trans_data: 3866 if pkg.state == 'Reinstall': 3867 if self.reinstall(pkgtup=pkg.pkgtup): 3868 done = True 3869 for pkg in transaction.trans_data: 3870 if pkg.state == 'Updated': 3871 try: 3872 if self.downgrade(pkgtup=pkg.pkgtup): 3873 done = True 3874 except yum.Errors.DowngradeError: 3875 self.logger.critical(_('Failed to downgrade: %s'), pkg) 3876 for pkg in transaction.trans_data: 3877 if pkg.state == 'Downgraded': 3878 if self.update(pkgtup=pkg.pkgtup): 3879 done = True 3880 for pkg in transaction.trans_data: 3881 if pkg.state == 'Obsoleting': 3882 if self.remove(pkgtup=pkg.pkgtup): 3883 done = True 3884 for pkg in transaction.trans_data: 3885 if pkg.state in ('Dep-Install', 'Install', 'True-Install'): 3886 if self.remove(pkgtup=pkg.pkgtup): 3887 done = True 3888 for pkg in transaction.trans_data: 3889 if pkg.state == 'Obsoleted': 3890 if self.install(pkgtup=pkg.pkgtup): 3891 done = True 3892 for pkg in transaction.trans_data: 3893 if pkg.state == 'Erase': 3894 if self.install(pkgtup=pkg.pkgtup): 3895 done = True 3896 self.conf.obsoletes = old_conf_obs 3897 return done
3898
3899 - def _retrievePublicKey(self, keyurl, repo=None):
3900 """ 3901 Retrieve a key file 3902 @param keyurl: url to the key to retrieve 3903 Returns a list of dicts with all the keyinfo 3904 """ 3905 key_installed = False 3906 3907 self.logger.info(_('Retrieving GPG key from %s') % keyurl) 3908 3909 # Go get the GPG key from the given URL 3910 try: 3911 url = misc.to_utf8(keyurl) 3912 if repo is None: 3913 rawkey = urlgrabber.urlread(url, limit=9999) 3914 else: 3915 # If we have a repo. use the proxy etc. configuration for it. 3916 # In theory we have a global proxy config. too, but meh... 3917 # external callers should just update. 3918 ug = URLGrabber(bandwidth = repo.bandwidth, 3919 retry = repo.retries, 3920 throttle = repo.throttle, 3921 progress_obj = repo.callback, 3922 proxies=repo.proxy_dict) 3923 ug.opts.user_agent = default_grabber.opts.user_agent 3924 rawkey = ug.urlread(url, text=repo.id + "/gpgkey") 3925 3926 except urlgrabber.grabber.URLGrabError, e: 3927 raise Errors.YumBaseError(_('GPG key retrieval failed: ') + 3928 to_unicode(str(e))) 3929 # Parse the key 3930 try: 3931 keys_info = misc.getgpgkeyinfo(rawkey, multiple=True) 3932 except ValueError, e: 3933 raise Errors.YumBaseError(_('Invalid GPG Key from %s: %s') % 3934 (url, to_unicode(str(e)))) 3935 keys = [] 3936 for keyinfo in keys_info: 3937 thiskey = {} 3938 for info in ('keyid', 'timestamp', 'userid', 3939 'fingerprint', 'raw_key'): 3940 if info not in keyinfo: 3941 raise Errors.YumBaseError, \ 3942 _('GPG key parsing failed: key does not have value %s') + info 3943 thiskey[info] = keyinfo[info] 3944 thiskey['hexkeyid'] = misc.keyIdToRPMVer(keyinfo['keyid']).upper() 3945 keys.append(thiskey) 3946 3947 return keys
3948
3949 - def getKeyForPackage(self, po, askcb = None, fullaskcb = None):
3950 """ 3951 Retrieve a key for a package. If needed, prompt for if the key should 3952 be imported using askcb. 3953 3954 @param po: Package object to retrieve the key of. 3955 @param askcb: Callback function to use for asking for verification. 3956 Takes arguments of the po, the userid for the key, and 3957 the keyid. 3958 @param fullaskcb: Callback function to use for asking for verification 3959 of a key. Differs from askcb in that it gets passed 3960 a dictionary so that we can expand the values passed. 3961 """ 3962 repo = self.repos.getRepo(po.repoid) 3963 keyurls = repo.gpgkey 3964 key_installed = False 3965 3966 ts = self.rpmdb.readOnlyTS() 3967 3968 for keyurl in keyurls: 3969 keys = self._retrievePublicKey(keyurl, repo) 3970 3971 for info in keys: 3972 # Check if key is already installed 3973 if misc.keyInstalled(ts, info['keyid'], info['timestamp']) >= 0: 3974 self.logger.info(_('GPG key at %s (0x%s) is already installed') % ( 3975 keyurl, info['hexkeyid'])) 3976 continue 3977 3978 # Try installing/updating GPG key 3979 self.logger.critical(_('Importing GPG key 0x%s "%s" from %s') % 3980 (info['hexkeyid'], 3981 to_unicode(info['userid']), 3982 keyurl.replace("file://",""))) 3983 rc = False 3984 if self.conf.assumeyes: 3985 rc = True 3986 elif fullaskcb: 3987 rc = fullaskcb({"po": po, "userid": info['userid'], 3988 "hexkeyid": info['hexkeyid'], 3989 "keyurl": keyurl, 3990 "fingerprint": info['fingerprint'], 3991 "timestamp": info['timestamp']}) 3992 elif askcb: 3993 rc = askcb(po, info['userid'], info['hexkeyid']) 3994 3995 if not rc: 3996 raise Errors.YumBaseError, _("Not installing key") 3997 3998 # Import the key 3999 result = ts.pgpImportPubkey(misc.procgpgkey(info['raw_key'])) 4000 if result != 0: 4001 raise Errors.YumBaseError, \ 4002 _('Key import failed (code %d)') % result 4003 self.logger.info(_('Key imported successfully')) 4004 key_installed = True 4005 4006 if not key_installed: 4007 raise Errors.YumBaseError, \ 4008 _('The GPG keys listed for the "%s" repository are ' \ 4009 'already installed but they are not correct for this ' \ 4010 'package.\n' \ 4011 'Check that the correct key URLs are configured for ' \ 4012 'this repository.') % (repo.name) 4013 4014 # Check if the newly installed keys helped 4015 result, errmsg = self.sigCheckPkg(po) 4016 if result != 0: 4017 self.logger.info(_("Import of key(s) didn't help, wrong key(s)?")) 4018 raise Errors.YumBaseError, errmsg
4019
4020 - def getKeyForRepo(self, repo, callback=None):
4021 """ 4022 Retrieve a key for a repository If needed, prompt for if the key should 4023 be imported using callback 4024 4025 @param repo: Repository object to retrieve the key of. 4026 @param callback: Callback function to use for asking for verification 4027 of a key. Takes a dictionary of key info. 4028 """ 4029 keyurls = repo.gpgkey 4030 key_installed = False 4031 for keyurl in keyurls: 4032 keys = self._retrievePublicKey(keyurl, repo) 4033 for info in keys: 4034 # Check if key is already installed 4035 if info['keyid'] in misc.return_keyids_from_pubring(repo.gpgdir): 4036 self.logger.info(_('GPG key at %s (0x%s) is already imported') % ( 4037 keyurl, info['hexkeyid'])) 4038 continue 4039 4040 # Try installing/updating GPG key 4041 self.logger.critical(_('Importing GPG key 0x%s "%s" from %s') % 4042 (info['hexkeyid'], 4043 to_unicode(info['userid']), 4044 keyurl.replace("file://",""))) 4045 rc = False 4046 if self.conf.assumeyes: 4047 rc = True 4048 elif callback: 4049 rc = callback({"repo": repo, "userid": info['userid'], 4050 "hexkeyid": info['hexkeyid'], "keyurl": keyurl, 4051 "fingerprint": info['fingerprint'], 4052 "timestamp": info['timestamp']}) 4053 4054 4055 if not rc: 4056 raise Errors.YumBaseError, _("Not installing key for repo %s") % repo 4057 4058 # Import the key 4059 result = misc.import_key_to_pubring(info['raw_key'], info['hexkeyid'], gpgdir=repo.gpgdir) 4060 if not result: 4061 raise Errors.YumBaseError, _('Key import failed') 4062 self.logger.info(_('Key imported successfully')) 4063 key_installed = True 4064 4065 if not key_installed: 4066 raise Errors.YumBaseError, \ 4067 _('The GPG keys listed for the "%s" repository are ' \ 4068 'already installed but they are not correct for this ' \ 4069 'package.\n' \ 4070 'Check that the correct key URLs are configured for ' \ 4071 'this repository.') % (repo.name)
4072 4073
4074 - def _limit_installonly_pkgs(self):
4075 """ Limit packages based on conf.installonly_limit, if any of the 4076 packages being installed have a provide in conf.installonlypkgs. 4077 New in 3.2.24: Obey yumdb_info.installonly data. """ 4078 4079 def _sort_and_filter_installonly(pkgs): 4080 """ Allow the admin to specify some overrides fo installonly pkgs. 4081 using the yumdb. """ 4082 ret_beg = [] 4083 ret_mid = [] 4084 ret_end = [] 4085 for pkg in sorted(pkgs): 4086 if 'installonly' not in pkg.yumdb_info: 4087 ret_mid.append(pkg) 4088 continue 4089 4090 if pkg.yumdb_info.installonly == 'keep': 4091 continue 4092 4093 if True: # Don't to magic sorting, yet 4094 ret_mid.append(pkg) 4095 continue 4096 4097 if pkg.yumdb_info.installonly == 'remove-first': 4098 ret_beg.append(pkg) 4099 elif pkg.yumdb_info.installonly == 'remove-last': 4100 ret_end.append(pkg) 4101 else: 4102 ret_mid.append(pkg) 4103 4104 return ret_beg + ret_mid + ret_end
4105 4106 if self.conf.installonly_limit < 1 : 4107 return 4108 4109 toremove = [] 4110 # We "probably" want to use either self.ts or self.rpmdb.ts if either 4111 # is available. However each ts takes a ref. on signals generally, and 4112 # SIGINT specifically, so we _must_ have got rid of all of the used tses 4113 # before we try downloading. This is called from buildTransaction() 4114 # so self.rpmdb.ts should be valid. 4115 ts = self.rpmdb.readOnlyTS() 4116 (cur_kernel_v, cur_kernel_r) = misc.get_running_kernel_version_release(ts) 4117 for instpkg in self.conf.installonlypkgs: 4118 for m in self.tsInfo.getMembers(): 4119 if (m.name == instpkg or instpkg in m.po.provides_names) \ 4120 and m.ts_state in ('i', 'u'): 4121 installed = self.rpmdb.searchNevra(name=m.name) 4122 installed = _sort_and_filter_installonly(installed) 4123 if len(installed) >= self.conf.installonly_limit - 1: # since we're adding one 4124 numleft = len(installed) - self.conf.installonly_limit + 1 4125 for po in installed: 4126 if (po.version, po.release) == (cur_kernel_v, cur_kernel_r): 4127 # don't remove running 4128 continue 4129 if numleft == 0: 4130 break 4131 toremove.append((po,m)) 4132 numleft -= 1 4133 4134 for po,rel in toremove: 4135 txmbr = self.tsInfo.addErase(po) 4136 # Add a dep relation to the new version of the package, causing this one to be erased 4137 # this way skipbroken, should clean out the old one, if the new one is skipped 4138 txmbr.depends_on.append(rel) 4139
4140 - def processTransaction(self, callback=None,rpmTestDisplay=None, rpmDisplay=None):
4141 ''' 4142 Process the current Transaction 4143 - Download Packages 4144 - Check GPG Signatures. 4145 - Run Test RPM Transaction 4146 - Run RPM Transaction 4147 4148 callback.event method is called at start/end of each process. 4149 4150 @param callback: callback object (must have an event method) 4151 @param rpmTestDisplay: Name of display class to use in RPM Test Transaction 4152 @param rpmDisplay: Name of display class to use in RPM Transaction 4153 ''' 4154 4155 if not callback: 4156 callback = callbacks.ProcessTransNoOutputCallback() 4157 4158 # Download Packages 4159 callback.event(callbacks.PT_DOWNLOAD) 4160 pkgs = self._downloadPackages(callback) 4161 # Check Package Signatures 4162 if pkgs != None: 4163 callback.event(callbacks.PT_GPGCHECK) 4164 self._checkSignatures(pkgs,callback) 4165 # Run Test Transaction 4166 callback.event(callbacks.PT_TEST_TRANS) 4167 self._doTestTransaction(callback,display=rpmTestDisplay) 4168 # Run Transaction 4169 callback.event(callbacks.PT_TRANSACTION) 4170 self._doTransaction(callback,display=rpmDisplay)
4171
4172 - def _downloadPackages(self,callback):
4173 ''' Download the need packages in the Transaction ''' 4174 # This can be overloaded by a subclass. 4175 dlpkgs = map(lambda x: x.po, filter(lambda txmbr: 4176 txmbr.ts_state in ("i", "u"), 4177 self.tsInfo.getMembers())) 4178 # Check if there is something to do 4179 if len(dlpkgs) == 0: 4180 return None 4181 # make callback with packages to download 4182 callback.event(callbacks.PT_DOWNLOAD_PKGS,dlpkgs) 4183 try: 4184 probs = self.downloadPkgs(dlpkgs) 4185 4186 except IndexError: 4187 raise Errors.YumBaseError, [_("Unable to find a suitable mirror.")] 4188 if len(probs) > 0: 4189 errstr = [_("Errors were encountered while downloading packages.")] 4190 for key in probs: 4191 errors = misc.unique(probs[key]) 4192 for error in errors: 4193 errstr.append("%s: %s" % (key, error)) 4194 4195 raise Errors.YumDownloadError, errstr 4196 return dlpkgs
4197
4198 - def _checkSignatures(self,pkgs,callback):
4199 ''' The the signatures of the downloaded packages ''' 4200 # This can be overloaded by a subclass. 4201 for po in pkgs: 4202 result, errmsg = self.sigCheckPkg(po) 4203 if result == 0: 4204 # Verified ok, or verify not req'd 4205 continue 4206 elif result == 1: 4207 self.getKeyForPackage(po, self._askForGPGKeyImport) 4208 else: 4209 raise Errors.YumGPGCheckError, errmsg 4210 4211 return 0
4212
4213 - def _askForGPGKeyImport(self, po, userid, hexkeyid):
4214 ''' 4215 Ask for GPGKeyImport 4216 This need to be overloaded in a subclass to make GPG Key import work 4217 ''' 4218 return False
4219
4220 - def _doTestTransaction(self,callback,display=None):
4221 ''' Do the RPM test transaction ''' 4222 # This can be overloaded by a subclass. 4223 if self.conf.rpm_check_debug: 4224 self.verbose_logger.log(logginglevels.INFO_2, 4225 _('Running rpm_check_debug')) 4226 msgs = self._run_rpm_check_debug() 4227 if msgs: 4228 rpmlib_only = True 4229 for msg in msgs: 4230 if msg.startswith('rpmlib('): 4231 continue 4232 rpmlib_only = False 4233 if rpmlib_only: 4234 retmsgs = [_("ERROR You need to update rpm to handle:")] 4235 retmsgs.extend(msgs) 4236 raise Errors.YumRPMCheckError, retmsgs 4237 retmsgs = [_('ERROR with rpm_check_debug vs depsolve:')] 4238 retmsgs.extend(msgs) 4239 retmsgs.append(_('Please report this error at %s') 4240 % self.conf.bugtracker_url) 4241 raise Errors.YumRPMCheckError,retmsgs 4242 4243 tsConf = {} 4244 for feature in ['diskspacecheck']: # more to come, I'm sure 4245 tsConf[feature] = getattr( self.conf, feature ) 4246 # 4247 testcb = RPMTransaction(self, test=True) 4248 # overwrite the default display class 4249 if display: 4250 testcb.display = display 4251 # clean out the ts b/c we have to give it new paths to the rpms 4252 del self.ts 4253 4254 self.initActionTs() 4255 # save our dsCallback out 4256 dscb = self.dsCallback 4257 self.dsCallback = None # dumb, dumb dumb dumb! 4258 self.populateTs( keepold=0 ) # sigh 4259 tserrors = self.ts.test( testcb, conf=tsConf ) 4260 del testcb 4261 4262 if len( tserrors ) > 0: 4263 errstring = _('Test Transaction Errors: ') 4264 for descr in tserrors: 4265 errstring += ' %s\n' % descr 4266 raise Errors.YumTestTransactionError, errstring 4267 4268 del self.ts 4269 # put back our depcheck callback 4270 self.dsCallback = dscb
4271 4272
4273 - def _doTransaction(self,callback,display=None):
4274 ''' do the RPM Transaction ''' 4275 # This can be overloaded by a subclass. 4276 self.initActionTs() # make a new, blank ts to populate 4277 self.populateTs( keepold=0 ) # populate the ts 4278 self.ts.check() # required for ordering 4279 self.ts.order() # order 4280 cb = RPMTransaction(self,display=SimpleCliCallBack) 4281 # overwrite the default display class 4282 if display: 4283 cb.display = display 4284 self.runTransaction( cb=cb )
4285
4286 - def _run_rpm_check_debug(self):
4287 results = [] 4288 # save our dsCallback out 4289 dscb = self.dsCallback 4290 self.dsCallback = None # dumb, dumb dumb dumb! 4291 self.populateTs(test=1) 4292 self.ts.check() 4293 for prob in self.ts.problems(): 4294 # Newer rpm (4.8.0+) has problem objects, older have just strings. 4295 # Should probably move to using the new objects, when we can. For 4296 # now just be compatible. 4297 results.append(to_str(prob)) 4298 4299 self.dsCallback = dscb 4300 return results
4301
4302 - def add_enable_repo(self, repoid, baseurls=[], mirrorlist=None, **kwargs):
4303 """add and enable a repo with just a baseurl/mirrorlist and repoid 4304 requires repoid and at least one of baseurl and mirrorlist 4305 additional optional kwargs are: 4306 variable_convert=bool (defaults to true) 4307 and any other attribute settable to the normal repo setup 4308 ex: metadata_expire, enable_groups, gpgcheck, cachedir, etc 4309 returns the repo object it added""" 4310 # out of place fixme - maybe we should make this the default repo addition 4311 # routine and use it from getReposFromConfigFile(), etc. 4312 newrepo = yumRepo.YumRepository(repoid) 4313 newrepo.name = repoid 4314 newrepo.basecachedir = self.conf.cachedir 4315 var_convert = True 4316 if kwargs.has_key('variable_convert') and not kwargs['variable_convert']: 4317 var_convert = False 4318 4319 if baseurls: 4320 replaced = [] 4321 if var_convert: 4322 for baseurl in baseurls: 4323 if baseurl: 4324 replaced.append(varReplace(baseurl, self.conf.yumvar)) 4325 else: 4326 replaced = baseurls 4327 newrepo.baseurl = replaced 4328 4329 if mirrorlist: 4330 if var_convert: 4331 mirrorlist = varReplace(mirrorlist, self.conf.yumvar) 4332 newrepo.mirrorlist = mirrorlist 4333 4334 # setup the repo 4335 newrepo.setup(cache=self.conf.cache) 4336 4337 # some reasonable defaults, (imo) 4338 newrepo.enablegroups = True 4339 newrepo.metadata_expire = 0 4340 newrepo.gpgcheck = self.conf.gpgcheck 4341 newrepo.repo_gpgcheck = self.conf.repo_gpgcheck 4342 newrepo.basecachedir = self.conf.cachedir 4343 4344 for key in kwargs.keys(): 4345 if not hasattr(newrepo, key): continue # skip the ones which aren't vars 4346 setattr(newrepo, key, kwargs[key]) 4347 4348 # add the new repo 4349 self.repos.add(newrepo) 4350 # enable the main repo 4351 self.repos.enableRepo(newrepo.id) 4352 return newrepo
4353
4354 - def setCacheDir(self, force=False, tmpdir='/var/tmp', reuse=True, 4355 suffix='/$basearch/$releasever'):
4356 ''' Set a new cache dir, using misc.getCacheDir() and var. replace 4357 on suffix. ''' 4358 4359 if not force and os.geteuid() == 0: 4360 return True # We are root, not forced, so happy with the global dir. 4361 try: 4362 cachedir = misc.getCacheDir(tmpdir, reuse) 4363 except (IOError, OSError), e: 4364 self.logger.critical(_('Could not set cachedir: %s') % str(e)) 4365 cachedir = None 4366 4367 if cachedir is None: 4368 return False # Tried, but failed, to get a "user" cachedir 4369 4370 cachedir += varReplace(suffix, self.conf.yumvar) 4371 self.repos.setCacheDir(cachedir) 4372 self.rpmdb.setCacheDir(cachedir) 4373 self.conf.cachedir = cachedir 4374 return True # We got a new cache dir
4375
4376 - def _does_this_update(self, pkg1, pkg2):
4377 """returns True if pkg1 can update pkg2, False if not. 4378 This only checks if it can be an update it does not check if 4379 it is obsoleting or anything else.""" 4380 4381 if pkg1.name != pkg2.name: 4382 return False 4383 if not pkg1.EVR > pkg2.EVR: 4384 return False 4385 if pkg1.arch not in self.arch.archlist: 4386 return False 4387 if rpmUtils.arch.canCoinstall(pkg1.arch, pkg2.arch): 4388 return False 4389 if self.allowedMultipleInstalls(pkg1): 4390 return False 4391 4392 return True
4393