Package entropy :: Package db :: Module sqlite

Source Code for Module entropy.db.sqlite

   1  # -*- coding: utf-8 -*- 
   2  """ 
   3   
   4      @author: Fabio Erculiani <[email protected]> 
   5      @contact: [email protected] 
   6      @copyright: Fabio Erculiani 
   7      @license: GPL-2 
   8   
   9      I{EntropySQLiteRepository} is the SQLite3 implementation of 
  10      the repository interface. 
  11   
  12  """ 
  13  import collections 
  14  import errno 
  15  import os 
  16  import hashlib 
  17  import time 
  18  try: 
  19      import thread 
  20  except ImportError: 
  21      import _thread as thread 
  22  import threading 
  23  import subprocess 
  24   
  25  from entropy.const import etpConst, const_convert_to_unicode, \ 
  26      const_get_buffer, const_convert_to_rawstring, const_pid_exists, \ 
  27      const_is_python3, const_debug_write, const_file_writable, \ 
  28      const_setup_directory, const_setup_file 
  29  from entropy.exceptions import SystemDatabaseError 
  30  from entropy.output import bold, red, blue, purple 
  31  from entropy.locks import ResourceLock 
  32   
  33  from entropy.db.exceptions import Warning, Error, InterfaceError, \ 
  34      DatabaseError, DataError, OperationalError, IntegrityError, \ 
  35      InternalError, ProgrammingError, NotSupportedError, LockAcquireError 
  36  from entropy.db.sql import EntropySQLRepository, SQLConnectionWrapper, \ 
  37      SQLCursorWrapper 
  38   
  39  from entropy.i18n import _ 
  40   
  41  import entropy.dep 
  42  import entropy.tools 
43 44 45 -class SQLiteCursorWrapper(SQLCursorWrapper):
46 47 """ 48 This class wraps a SQLite cursor in order to have 49 it thrown entropy.db.exceptions objects. 50 The API is a subset of the one specified in 51 Python DBAPI 2.0. 52 """ 53
54 - def __init__(self, cursor, exceptions):
56
57 - def execute(self, *args, **kwargs):
58 cur = self._proxy_call(self._cur.execute, *args, **kwargs) 59 return SQLiteCursorWrapper(cur, self._excs)
60
61 - def executemany(self, *args, **kwargs):
62 cur = self._proxy_call(self._cur.executemany, *args, **kwargs) 63 return SQLiteCursorWrapper(cur, self._excs)
64
65 - def close(self, *args, **kwargs):
66 return self._proxy_call(self._cur.close, *args, **kwargs)
67
68 - def fetchone(self, *args, **kwargs):
69 return self._proxy_call(self._cur.fetchone, *args, **kwargs)
70
71 - def fetchall(self, *args, **kwargs):
72 return self._proxy_call(self._cur.fetchall, *args, **kwargs)
73
74 - def fetchmany(self, *args, **kwargs):
75 return self._proxy_call(self._cur.fetchmany, *args, **kwargs)
76
77 - def executescript(self, *args, **kwargs):
78 return self._proxy_call(self._cur.executescript, *args, **kwargs)
79
80 - def callproc(self, *args, **kwargs):
81 return self._proxy_call(self._cur.callproc, *args, **kwargs)
82
83 - def nextset(self, *args, **kwargs):
84 return self._proxy_call(self._cur.nextset, *args, **kwargs)
85
86 - def __iter__(self):
87 cur = iter(self._cur) 88 return SQLiteCursorWrapper(cur, self._excs)
89
90 - def __next__(self):
91 return self.wrap(next, self._cur)
92
93 - def next(self):
94 return self.wrap(self._cur.next)
95
96 97 -class SQLiteConnectionWrapper(SQLConnectionWrapper):
98 """ 99 This class wraps a SQLite connection and 100 makes execute(), executemany() return 101 the connection itself. 102 """ 103
104 - def __init__(self, connection, exceptions):
105 SQLConnectionWrapper.__init__(self, connection, exceptions)
106
107 - def ping(self):
108 return
109
110 - def unicode(self):
111 self._con.text_factory = const_convert_to_unicode
112
113 - def rawstring(self):
114 self._con.text_factory = const_convert_to_rawstring
115
116 - def interrupt(self):
117 return self._proxy_call(self._excs, self._con.interrupt)
118
119 - def _iterdump(self):
120 return self._con.iterdump()
121
122 123 -class EntropySQLiteRepository(EntropySQLRepository):
124 125 """ 126 EntropySQLiteRepository implements SQLite3 based storage. 127 In a Model-View based pattern, it can be considered the "model". 128 Actually it's the only one available but more model backends will be 129 supported in future (which will inherit this class directly). 130 Beside the underlying SQLite3 calls are thread safe, you are responsible 131 of the semantic of your calls. 132 """ 133 134 # bump this every time schema changes and databaseStructureUpdate 135 # should be triggered 136 _SCHEMA_REVISION = 6 137 138 _INSERT_OR_REPLACE = "INSERT OR REPLACE" 139 _INSERT_OR_IGNORE = "INSERT OR IGNORE" 140 _UPDATE_OR_REPLACE = "UPDATE OR REPLACE" 141 _CACHE_SIZE = 8192 142 143 SETTING_KEYS = ("arch", "on_delete_cascade", "schema_revision", 144 "_baseinfo_extrainfo_2010") 145
146 - class SQLiteProxy(object):
147 148 _mod = None 149 _excs = None 150 _lock = threading.Lock() 151 152 @staticmethod
153 - def get():
154 """ 155 Lazily load the SQLite3 module. 156 """ 157 if EntropySQLiteRepository.SQLiteProxy._mod is None: 158 with EntropySQLiteRepository.SQLiteProxy._lock: 159 if EntropySQLiteRepository.SQLiteProxy._mod is None: 160 from sqlite3 import dbapi2 161 EntropySQLiteRepository.SQLiteProxy._excs = dbapi2 162 EntropySQLiteRepository.SQLiteProxy._mod = dbapi2 163 return EntropySQLiteRepository.SQLiteProxy._mod
164 165 @staticmethod
166 - def exceptions():
167 """ 168 Get the SQLite3 exceptions module. 169 """ 170 _mod = EntropySQLiteRepository.SQLiteProxy.get() 171 return EntropySQLiteRepository.SQLiteProxy._excs
172 173 @staticmethod
174 - def errno():
175 """ 176 Get the SQLite3 errno module (not avail). 177 """ 178 raise NotImplementedError()
179 180 ModuleProxy = SQLiteProxy 181
182 - def __init__(self, readOnly = False, dbFile = None, xcache = False, 183 name = None, indexing = True, skipChecks = False, 184 temporary = False, direct = False, cache_policy = None):
185 """ 186 EntropySQLiteRepository constructor. 187 188 @keyword readOnly: open file in read-only mode 189 @type readOnly: bool 190 @keyword dbFile: path to database to open 191 @type dbFile: string 192 @keyword xcache: enable on-disk cache 193 @type xcache: bool 194 @keyword name: repository identifier 195 @type name: string 196 @keyword indexing: enable database indexes 197 @type indexing: bool 198 @keyword skipChecks: if True, skip integrity checks 199 @type skipChecks: bool 200 @keyword temporary: if True, dbFile will be automatically removed 201 on close() 202 @type temporary: bool 203 @keyword direct: True, if direct mode should be always enabled 204 @type direct: bool 205 @keyword cache_policy: set the cache policy that should be used 206 @type cache_policy: EntropyRepositoryCachePolicies attribute 207 """ 208 self._rwsem_lock = threading.RLock() 209 self._rwsem = None 210 211 self._sqlite = self.ModuleProxy.get() 212 213 EntropySQLRepository.__init__( 214 self, dbFile, readOnly, skipChecks, indexing, 215 xcache, temporary, name, direct=direct, 216 cache_policy=cache_policy) 217 218 if self._db is None: 219 raise AttributeError("valid database path needed") 220 221 # tracking mtime to validate repository Live cache as 222 # well. 223 try: 224 self.__cur_mtime = self.mtime() 225 except (OSError, IOError): 226 self.__cur_mtime = None 227 228 self._schema_update_run = False 229 self._schema_update_lock = threading.Lock() 230 231 if not self._skip_checks: 232 233 if not entropy.tools.is_user_in_entropy_group(): 234 # forcing since we won't have write access to db 235 self._indexing = False 236 # live systems don't like wasting RAM 237 if entropy.tools.islive() and not etpConst['systemroot']: 238 self._indexing = False 239 240 self._maybeDatabaseSchemaUpdates()
241
242 - def lock_path(self):
243 """ 244 Overridden from EntropyBaseRepository. 245 """ 246 if self._is_memory(): 247 return os.path.join( 248 etpConst['entropyrundir'], 249 "repository", 250 "%s_%s.lock" % ( 251 self.name, 252 os.getpid(), 253 ) 254 ) 255 return super(EntropySQLiteRepository, self).lock_path()
256
258 """ 259 Determine whether it is necessary to run a schema update. 260 """ 261 if self._schema_update_run: 262 return 263 264 update = False 265 if not self._skip_checks: 266 267 def _is_avail(): 268 if self._is_memory(): 269 return True 270 return const_file_writable(self._db)
271 272 try: 273 if _is_avail() and self._doesTableExist('baseinfo') and \ 274 self._doesTableExist('extrainfo'): 275 276 if entropy.tools.islive(): # this works 277 if etpConst['systemroot']: 278 update = True 279 else: 280 update = True 281 282 except Error: 283 self._cleanup_all(_cleanup_main_thread=False) 284 raise 285 286 if update: 287 with self._schema_update_lock: 288 self._schema_update_run = True 289 self._databaseSchemaUpdates()
290
291 - def _concatOperator(self, fields):
292 """ 293 Reimplemented from EntropySQLRepository. 294 """ 295 return " || ".join(fields)
296
297 - def _doesTableExist(self, table, temporary = False):
298 299 # NOTE: override cache when temporary is True 300 if temporary: 301 # temporary table do not pop-up with the statement below, so 302 # we need to handle them with "care" 303 try: 304 cur = self._cursor().execute(""" 305 SELECT count(*) FROM `%s` LIMIT 1""" % (table,)) 306 cur.fetchone() 307 except OperationalError: 308 return False 309 return True 310 311 # speed up a bit if we already reported a table as existing 312 cached = self._getLiveCache("_doesTableExist") 313 if cached is None: 314 cached = {} 315 elif table in cached: 316 # avoid memleak with python3.x 317 obj = cached[table] 318 del cached 319 return obj 320 321 cur = self._cursor().execute(""" 322 SELECT name FROM SQLITE_MASTER WHERE type = "table" AND name = (?) 323 LIMIT 1 324 """, (table,)) 325 rslt = cur.fetchone() 326 exists = rslt is not None 327 328 cached[table] = exists 329 self._setLiveCache("_doesTableExist", cached) 330 # avoid python3.x memleak 331 del cached 332 333 return exists
334
335 - def _doesColumnInTableExist(self, table, column):
336 337 # speed up a bit if we already reported a column as existing 338 d_tup = (table, column,) 339 cached = self._getLiveCache("_doesColumnInTableExist") 340 if cached is None: 341 cached = {} 342 elif d_tup in cached: 343 # avoid memleak with python3.x 344 obj = cached[d_tup] 345 del cached 346 return obj 347 348 try: 349 self._cursor().execute(""" 350 SELECT `%s` FROM `%s` LIMIT 1 351 """ % (column, table)) 352 exists = True 353 except OperationalError: 354 exists = False 355 356 cached[d_tup] = exists 357 self._setLiveCache("_doesColumnInTableExist", cached) 358 # avoid python3.x memleak 359 del cached 360 361 return exists
362
363 - def readonly(self):
364 """ 365 Reimplemented from EntropySQLRepository. 366 """ 367 if (not self._readonly) and not self._is_memory(): 368 if os.getuid() != 0: 369 # make sure that user can write to file 370 # before returning False, override actual 371 # readonly status 372 return not const_file_writable(self._db) 373 return self._readonly
374
375 - def _cursor(self):
376 """ 377 Reimplemented from EntropySQLRepository. 378 """ 379 current_thread = threading.current_thread() 380 c_key = self._cursor_connection_pool_key() 381 382 _init_db = False 383 cursor = None 384 with self._cursor_pool_mutex(): 385 threads = set() 386 cursor_pool = self._cursor_pool() 387 cursor_data = cursor_pool.get(c_key) 388 if cursor_data is not None: 389 cursor, threads = cursor_data 390 # handle possible thread ident clashing 391 # in the cleanup thread function, because 392 # thread idents are recycled 393 # on thread termination 394 threads.add(current_thread) 395 396 if cursor is None: 397 conn = self._connection_impl(_from_cursor=True) 398 cursor = SQLiteCursorWrapper( 399 conn.cursor(), 400 self.ModuleProxy.exceptions()) 401 # !!! enable foreign keys pragma !!! do not remove this 402 # otherwise removePackage won't work properly 403 cursor.execute("pragma foreign_keys = 1").fetchall() 404 # setup temporary tables and indices storage 405 # to in-memory value 406 # http://www.sqlite.org/pragma.html#pragma_temp_store 407 cursor.execute("pragma temp_store = 2").fetchall() 408 cursor_pool[c_key] = cursor, threads 409 self._start_cleanup_monitor(current_thread, c_key) 410 _init_db = True 411 # memory databases are critical because every new cursor brings 412 # up a totally empty repository. So, enforce initialization. 413 if _init_db and self._is_memory(): 414 self.initializeRepository() 415 return cursor
416
417 - def _connection_impl(self, _from_cursor=False):
418 """ 419 Connection getter method implementation, adds 420 _from_cursor argument to avoid calling the 421 cleanup routine if True. 422 """ 423 current_thread = threading.current_thread() 424 c_key = self._cursor_connection_pool_key() 425 426 conn = None 427 with self._connection_pool_mutex(): 428 threads = set() 429 connection_pool = self._connection_pool() 430 conn_data = connection_pool.get(c_key) 431 if conn_data is not None: 432 conn, threads = conn_data 433 # handle possible thread ident clashing 434 # in the cleanup thread function 435 # because thread idents are recycled on 436 # thread termination 437 threads.add(current_thread) 438 439 if conn is None: 440 # check_same_thread still required for 441 # conn.close() called from 442 # arbitrary thread 443 conn = SQLiteConnectionWrapper.connect( 444 self.ModuleProxy, self._sqlite, 445 SQLiteConnectionWrapper, 446 self._db, timeout=300.0, 447 check_same_thread=False) 448 connection_pool[c_key] = conn, threads 449 if not _from_cursor: 450 self._start_cleanup_monitor(current_thread, c_key) 451 return conn
452
453 - def _connection(self):
454 """ 455 Reimplemented from EntropySQLRepository. 456 """ 457 return self._connection_impl()
458
459 - def __show_info(self):
460 first_part = "<EntropySQLiteRepository instance at %s, %s" % ( 461 hex(id(self)), self._db,) 462 second_part = ", ro: %s|%s, caching: %s, indexing: %s" % ( 463 self._readonly, self.readonly(), self.caching(), 464 self._indexing,) 465 third_part = ", name: %s, skip_upd: %s" % ( 466 self.name, self._skip_checks,) 467 fourth_part = ", conn_pool: %s, cursor_cache: %s>" % ( 468 self._connection_pool(), self._cursor_pool(),) 469 470 return first_part + second_part + third_part + fourth_part
471
472 - def __repr__(self):
473 return self.__show_info()
474
475 - def __str__(self):
476 return self.__show_info()
477
478 - def __unicode__(self):
479 return self.__show_info()
480
481 - def _setCacheSize(self, size):
482 """ 483 Change low-level, storage engine based cache size. 484 485 @param size: new size 486 @type size: int 487 """ 488 self._cursor().execute('PRAGMA cache_size = %s' % (size,))
489
490 - def _is_memory(self):
491 """ 492 Return True whether the database is stored in memory. 493 """ 494 return self._db == ":memory:"
495
496 - def _setDefaultCacheSize(self, size):
497 """ 498 Change default low-level, storage engine based cache size. 499 500 @param size: new default size 501 @type size: int 502 """ 503 self._cursor().execute('PRAGMA default_cache_size = %s' % (size,))
504
505 - def _getLiveCache(self, key):
506 """ 507 Reimplemented from EntropySQLRepository. 508 """ 509 try: 510 mtime = self.mtime() 511 except (OSError, IOError): 512 mtime = None 513 if self.__cur_mtime != mtime: 514 self.__cur_mtime = mtime 515 self._discardLiveCache() 516 return self._live_cacher.get(self._getLiveCacheKey() + key)
517
518 - def _get_reslock(self, mode):
519 """ 520 Get the lock object used for locking. 521 """ 522 523 class RepositoryResourceLock(ResourceLock): 524 525 def __init__(self, repo, mode, path): 526 super(RepositoryResourceLock, self).__init__( 527 output = repo) 528 self._path = path 529 self._mode = mode
530 531 def path(self): 532 """ 533 Overridden from ResourceLock. 534 """ 535 return self._path 536 537 def directed(self): 538 """ 539 Return whether this lock has been created 540 with direct mode enabled. 541 """ 542 return False 543 544 class DirectFakeResourceLock(object): 545 546 def __init__(self, mode): 547 self._mode = mode 548 549 def directed(self): 550 """ 551 Return whether this lock has been created 552 with direct mode enabled. 553 """ 554 return True 555 556 if self.directed(): 557 return DirectFakeResourceLock(mode) 558 else: 559 return RepositoryResourceLock(self, mode, self.lock_path()) 560
561 - def acquire_shared(self):
562 """ 563 Reimplemented from EntropyBaseRepository. 564 """ 565 lock = self._get_reslock(False) 566 if lock.directed(): 567 return lock 568 569 already_acquired = lock.is_already_acquired() 570 try: 571 lock.acquire_shared() 572 except OSError as err: 573 raise LockAcquireError(err) 574 575 if not already_acquired: 576 # in-RAM cached data may have become stale 577 if not self._is_memory(): 578 self.clearCache() 579 580 return lock
581
582 - def try_acquire_shared(self):
583 """ 584 Reimplemented from EntropyBaseRepository. 585 """ 586 lock = self._get_reslock(False) 587 if lock.directed(): 588 return lock 589 590 already_acquired = lock.is_already_acquired() 591 592 try: 593 acquired = lock.try_acquire_shared() 594 except OSError as err: 595 raise LockAcquireError(err) 596 597 if acquired: 598 if not already_acquired: 599 # in-RAM cached data may have become stale 600 if not self._is_memory(): 601 self.clearCache() 602 return lock 603 else: 604 return None
605
606 - def acquire_exclusive(self):
607 """ 608 Reimplemented from EntropyBaseRepository. 609 """ 610 lock = self._get_reslock(True) 611 if lock.directed(): 612 return lock 613 614 already_acquired = lock.is_already_acquired() 615 try: 616 lock.acquire_exclusive() 617 except OSError as err: 618 raise LockAcquireError(err) 619 620 if not already_acquired: 621 # in-RAM cached data may have become stale 622 if not self._is_memory(): 623 self.clearCache() 624 return lock
625
626 - def try_acquire_exclusive(self):
627 """ 628 Reimplemented from EntropyBaseRepository. 629 """ 630 lock = self._get_reslock(True) 631 if lock.directed(): 632 return lock 633 634 already_acquired = lock.is_already_acquired() 635 try: 636 acquired = lock.try_acquire_exclusive() 637 except OSError as err: 638 raise LockAcquireError(err) 639 640 if acquired: 641 if not already_acquired: 642 # in-RAM cached data may have become stale 643 if not self._is_memory(): 644 self.clearCache() 645 return lock
646
647 - def _release_reslock(self, lock, mode):
648 """ 649 Release the resource associated with the RepositoryResourceLock object. 650 """ 651 if lock._mode != mode: 652 raise RuntimeError( 653 "Programming error: acquired lock in a different mode") 654 655 if lock.directed(): 656 if not self.directed(): 657 raise RuntimeError( 658 "Programming error: acquired lock in directed mode") 659 return 660 661 lock.release()
662
663 - def release_shared(self, opaque):
664 """ 665 Reimplemented from EntropyBaseRepository. 666 """ 667 self.commit() 668 669 self._release_reslock(opaque, False)
670
671 - def release_exclusive(self, opaque):
672 """ 673 Reimplemented from EntropyBaseRepository. 674 """ 675 self.commit() 676 677 self._release_reslock(opaque, True)
678
679 - def close(self, safe=False):
680 """ 681 Reimplemented from EntropySQLRepository. 682 Needs to call superclass method. 683 """ 684 super(EntropySQLiteRepository, self).close(safe=safe) 685 686 self._cleanup_all(_cleanup_main_thread=not safe) 687 if self._temporary and (not self._is_memory()) and \ 688 os.path.isfile(self._db): 689 try: 690 os.remove(self._db) 691 except (OSError, IOError,): 692 pass 693 # live cache must be discarded every time the repository is closed 694 # in order to avoid data mismatches for long-running processes 695 # that load and unload Entropy Framework often. 696 # like "client-updates-daemon". 697 self._discardLiveCache()
698
699 - def vacuum(self):
700 """ 701 Reimplemented from EntropySQLRepository. 702 """ 703 self._cursor().execute("vacuum")
704
705 - def initializeRepository(self):
706 """ 707 Reimplemented from EntropySQLRepository. 708 """ 709 my = self.Schema() 710 self.dropAllIndexes() 711 for table in self._listAllTables(): 712 try: 713 self._cursor().execute("DROP TABLE %s" % (table,)) 714 except OperationalError: 715 # skip tables that can't be dropped 716 continue 717 self._cursor().executescript(my.get_init()) 718 self.commit() 719 self._clearLiveCache("_doesTableExist") 720 self._clearLiveCache("_doesColumnInTableExist") 721 self._setupInitialSettings() 722 # set cache size 723 self._setCacheSize(self._CACHE_SIZE) 724 self._setDefaultCacheSize(self._CACHE_SIZE) 725 self._databaseSchemaUpdates() 726 727 self.commit() 728 self._clearLiveCache("_doesTableExist") 729 self._clearLiveCache("_doesColumnInTableExist") 730 super(EntropySQLiteRepository, self).initializeRepository()
731
732 - def handlePackage(self, pkg_data, revision = None, 733 formattedContent = False):
734 """ 735 Reimplemented from EntropySQLRepository. 736 """ 737 raise NotImplementedError()
738
739 - def _removePackage(self, package_id, from_add_package = False):
740 """ 741 Reimplemented from EntropySQLRepository. 742 We must handle on_delete_cascade. 743 """ 744 try: 745 new_way = self.getSetting("on_delete_cascade") 746 except KeyError: 747 new_way = '' 748 # TODO: remove this before 31-12-2011 (deprecate) 749 if new_way: 750 # this will work thanks to ON DELETE CASCADE ! 751 self._cursor().execute( 752 "DELETE FROM baseinfo WHERE idpackage = (?)", (package_id,)) 753 else: 754 r_tup = (package_id,)*20 755 self._cursor().executescript(""" 756 DELETE FROM baseinfo WHERE idpackage = %d; 757 DELETE FROM extrainfo WHERE idpackage = %d; 758 DELETE FROM dependencies WHERE idpackage = %d; 759 DELETE FROM provide WHERE idpackage = %d; 760 DELETE FROM conflicts WHERE idpackage = %d; 761 DELETE FROM configprotect WHERE idpackage = %d; 762 DELETE FROM configprotectmask WHERE idpackage = %d; 763 DELETE FROM sources WHERE idpackage = %d; 764 DELETE FROM useflags WHERE idpackage = %d; 765 DELETE FROM keywords WHERE idpackage = %d; 766 DELETE FROM content WHERE idpackage = %d; 767 DELETE FROM counters WHERE idpackage = %d; 768 DELETE FROM sizes WHERE idpackage = %d; 769 DELETE FROM triggers WHERE idpackage = %d; 770 DELETE FROM systempackages WHERE idpackage = %d; 771 DELETE FROM injected WHERE idpackage = %d; 772 DELETE FROM installedtable WHERE idpackage = %d; 773 DELETE FROM packagedesktopmime WHERE idpackage = %d; 774 DELETE FROM provided_mime WHERE idpackage = %d; 775 """ % r_tup) 776 # Added on Aug. 2011 777 if self._doesTableExist("packagedownloads"): 778 self._cursor().execute(""" 779 DELETE FROM packagedownloads WHERE idpackage = (?)""", 780 (package_id,)) 781 782 # Added on Sept. 2014 783 if self._doesTableExist("needed_libs"): 784 self._cursor().execute( 785 "DELETE FROM needed_libs WHERE idpackage = (?)", 786 (package_id,)) 787 else: 788 self._cursor().execute( 789 "DELETE FROM needed WHERE idpackage = (?)", 790 (package_id,))
791
792 - def _addDependency(self, dependency):
793 """ 794 Reimplemented from EntropySQLRepository. 795 """ 796 self._clearLiveCache("retrieveDependencies") 797 return super(EntropySQLiteRepository, self)._addDependency( 798 dependency)
799
800 - def _addCategory(self, category):
801 """ 802 Reimplemented from EntropySQLRepository. 803 """ 804 self._clearLiveCache("retrieveCategory") 805 self._clearLiveCache("searchNameCategory") 806 self._clearLiveCache("retrieveKeySlot") 807 self._clearLiveCache("retrieveKeySplit") 808 self._clearLiveCache("searchKeySlot") 809 self._clearLiveCache("searchKeySlotTag") 810 self._clearLiveCache("retrieveKeySlotAggregated") 811 self._clearLiveCache("getStrictData") 812 return super(EntropySQLiteRepository, self)._addCategory(category)
813
814 - def setCategory(self, package_id, category):
815 """ 816 Reimplemented from EntropySQLRepository. 817 We must handle _baseinfo_extrainfo_2010 and live cache. 818 """ 819 if self._isBaseinfoExtrainfo2010(): 820 self._cursor().execute(""" 821 UPDATE baseinfo SET category = (?) WHERE idpackage = (?) 822 """, (category, package_id,)) 823 else: 824 # create new category if it doesn't exist 825 catid = self._isCategoryAvailable(category) 826 if catid == -1: 827 # create category 828 catid = self._addCategory(category) 829 self._cursor().execute(""" 830 UPDATE baseinfo SET idcategory = (?) WHERE idpackage = (?) 831 """, (catid, package_id,)) 832 833 self._clearLiveCache("retrieveCategory") 834 self._clearLiveCache("searchNameCategory") 835 self._clearLiveCache("retrieveKeySlot") 836 self._clearLiveCache("retrieveKeySplit") 837 self._clearLiveCache("searchKeySlot") 838 self._clearLiveCache("searchKeySlotTag") 839 self._clearLiveCache("retrieveKeySlotAggregated") 840 self._clearLiveCache("getStrictData")
841
842 - def setName(self, package_id, name):
843 """ 844 Reimplemented from EntropySQLRepository. 845 We must handle live cache. 846 """ 847 super(EntropySQLiteRepository, self).setName(package_id, name) 848 self._clearLiveCache("searchNameCategory") 849 self._clearLiveCache("retrieveKeySlot") 850 self._clearLiveCache("retrieveKeySplit") 851 self._clearLiveCache("searchKeySlot") 852 self._clearLiveCache("searchKeySlotTag") 853 self._clearLiveCache("retrieveKeySlotAggregated") 854 self._clearLiveCache("getStrictData")
855
856 - def setDependency(self, iddependency, dependency):
857 """ 858 Reimplemented from EntropySQLRepository. 859 We must handle live cache. 860 """ 861 super(EntropySQLiteRepository, self).setDependency( 862 iddependency, dependency) 863 self._clearLiveCache("retrieveDependencies")
864
865 - def setAtom(self, package_id, atom):
866 """ 867 Reimplemented from EntropySQLRepository. 868 We must handle live cache. 869 """ 870 super(EntropySQLiteRepository, self).setAtom(package_id, atom) 871 self._clearLiveCache("searchNameCategory") 872 self._clearLiveCache("getStrictScopeData") 873 self._clearLiveCache("getStrictData")
874
875 - def setSlot(self, package_id, slot):
876 """ 877 Reimplemented from EntropySQLRepository. 878 We must handle live cache. 879 """ 880 super(EntropySQLiteRepository, self).setSlot(package_id, slot) 881 self._clearLiveCache("retrieveSlot") 882 self._clearLiveCache("retrieveKeySlot") 883 self._clearLiveCache("searchKeySlot") 884 self._clearLiveCache("searchKeySlotTag") 885 self._clearLiveCache("retrieveKeySlotAggregated") 886 self._clearLiveCache("getStrictScopeData") 887 self._clearLiveCache("getStrictData")
888
889 - def setRevision(self, package_id, revision):
890 """ 891 Reimplemented from EntropySQLRepository. 892 We must handle live cache. 893 """ 894 super(EntropySQLiteRepository, self).setRevision( 895 package_id, revision) 896 self._clearLiveCache("retrieveRevision") 897 self._clearLiveCache("getVersioningData") 898 self._clearLiveCache("getStrictScopeData") 899 self._clearLiveCache("getStrictData")
900
901 - def removeDependencies(self, package_id):
902 """ 903 Reimplemented from EntropySQLRepository. 904 We must handle live cache. 905 """ 906 super(EntropySQLiteRepository, self).removeDependencies( 907 package_id) 908 self._clearLiveCache("retrieveDependencies")
909
910 - def insertDependencies(self, package_id, depdata):
911 """ 912 Reimplemented from EntropySQLRepository. 913 We must handle live cache. 914 """ 915 super(EntropySQLiteRepository, self).insertDependencies( 916 package_id, depdata) 917 self._clearLiveCache("retrieveDependencies")
918
919 - def _insertNeededLibs(self, package_id, needed_libs):
920 """ 921 Reimplemented from EntropySQLRepository. 922 We must handle backward compatibility. 923 """ 924 try: 925 # be optimistic and delay if condition 926 super(EntropySQLiteRepository, self)._insertNeededLibs( 927 package_id, needed_libs) 928 except OperationalError as err: 929 if self._doesTableExist("needed_libs"): 930 raise 931 self._migrateNeededLibs() 932 super(EntropySQLiteRepository, self)._insertNeededLibs( 933 package_id, needed_libs)
934
935 - def _insertUseflags(self, package_id, useflags):
936 """ 937 Reimplemented from EntropySQLRepository. 938 We must handle live cache. 939 """ 940 super(EntropySQLiteRepository, self)._insertUseflags( 941 package_id, useflags) 942 self._clearLiveCache("retrieveUseflags")
943
944 - def _insertExtraDownload(self, package_id, package_downloads_data):
945 """ 946 Reimplemented from EntropySQLRepository. 947 We must handle backward compatibility. 948 """ 949 try: 950 # be optimistic and delay if condition 951 super(EntropySQLiteRepository, self)._insertExtraDownload( 952 package_id, package_downloads_data) 953 except OperationalError as err: 954 if self._doesTableExist("packagedownloads"): 955 raise 956 self._createPackageDownloadsTable() 957 super(EntropySQLiteRepository, self)._insertExtraDownload( 958 package_id, package_downloads_data)
959
960 - def listAllPreservedLibraries(self):
961 """ 962 Reimplemented from EntropySQLRepository. 963 """ 964 try: 965 return super(EntropySQLiteRepository, 966 self).listAllPreservedLibraries() 967 except OperationalError: 968 # TODO: backward compatibility, remove after 2014 969 if self._doesTableExist("preserved_libs"): 970 raise 971 return tuple()
972
973 - def retrievePreservedLibraries(self, library, elfclass):
974 """ 975 Reimplemented from EntropySQLRepository. 976 """ 977 try: 978 return super(EntropySQLiteRepository, 979 self).retrievePreservedLibraries(library, elfclass) 980 except OperationalError: 981 # TODO: backward compatibility, remove after 2014 982 if self._doesTableExist("preserved_libs"): 983 raise 984 return tuple()
985
986 - def _bindSpmPackageUid(self, package_id, spm_package_uid, branch):
987 """ 988 Reimplemented from EntropySQLRepository. 989 We must handle backward compatibility. 990 """ 991 try: 992 return super(EntropySQLiteRepository, 993 self)._bindSpmPackageUid( 994 package_id, spm_package_uid, branch) 995 except IntegrityError: 996 # we have a PRIMARY KEY we need to remove 997 self._migrateCountersTable() 998 return super(EntropySQLiteRepository, 999 self)._bindSpmPackageUid( 1000 package_id, spm_package_uid, branch)
1001
1002 - def _cleanupChangelogs(self):
1003 """ 1004 Reimplemented from EntropySQLRepository. 1005 We must handle _baseinfo_extrainfo_2010. 1006 """ 1007 if self._isBaseinfoExtrainfo2010(): 1008 return super(EntropySQLiteRepository, 1009 self)._cleanupChangelogs() 1010 1011 # backward compatibility 1012 self._cursor().execute(""" 1013 DELETE FROM packagechangelogs 1014 WHERE category || "/" || name NOT IN 1015 (SELECT categories.category || "/" || baseinfo.name 1016 FROM baseinfo, categories 1017 WHERE baseinfo.idcategory = categories.idcategory) 1018 """)
1019
1020 - def _cleanupDependencies(self):
1021 """ 1022 Reimplemented from EntropySQLRepository. 1023 We must handle live cache. 1024 """ 1025 super(EntropySQLiteRepository, self)._cleanupDependencies() 1026 self._clearLiveCache("retrieveDependencies")
1027
1028 - def getVersioningData(self, package_id):
1029 """ 1030 Reimplemented from EntropySQLRepository. 1031 We must use the in-memory cache to do some memoization. 1032 """ 1033 if self.directed() or self.cache_policy_none(): 1034 return super(EntropySQLiteRepository, self).getVersioningData( 1035 package_id) 1036 1037 cached = self._getLiveCache("getVersioningData") 1038 if cached is None: 1039 cur = self._cursor().execute(""" 1040 SELECT idpackage, version, versiontag, revision FROM baseinfo 1041 """) 1042 cached = dict((pkg_id, (ver, tag, rev)) for pkg_id, ver, tag, 1043 rev in cur) 1044 self._setLiveCache("getVersioningData", cached) 1045 # avoid python3.x memleak 1046 obj = cached.get(package_id) 1047 del cached 1048 return obj
1049
1050 - def getStrictData(self, package_id):
1051 """ 1052 Reimplemented from EntropySQLRepository. 1053 We must use the in-memory cache to do some memoization. 1054 """ 1055 if self.directed() or self.cache_policy_none(): 1056 return super(EntropySQLiteRepository, self).getStrictData( 1057 package_id) 1058 1059 cached = self._getLiveCache("getStrictData") 1060 if cached is None: 1061 if self._isBaseinfoExtrainfo2010(): 1062 cur = self._cursor().execute(""" 1063 SELECT idpackage, category || "/" || name, slot, version, 1064 versiontag, revision, atom FROM baseinfo 1065 """) 1066 else: 1067 # we must guarantee backward compatibility 1068 cur = self._cursor().execute(""" 1069 SELECT baseinfo.idpackage, categories.category || "/" || 1070 baseinfo.name, baseinfo.slot, baseinfo.version, 1071 baseinfo.versiontag, baseinfo.revision, baseinfo.atom 1072 FROM baseinfo, categories 1073 WHERE baseinfo.idcategory = categories.idcategory 1074 """) 1075 cached = dict((pkg_id, (key, slot, version, tag, rev, atom)) \ 1076 for pkg_id, key, slot, version, tag, \ 1077 rev, atom in cur) 1078 self._setLiveCache("getStrictData", cached) 1079 1080 # avoid python3.x memleak 1081 obj = cached.get(package_id) 1082 del cached 1083 return obj
1084
1085 - def getStrictScopeData(self, package_id):
1086 """ 1087 Reimplemented from EntropySQLRepository. 1088 We must use the in-memory cache to do some memoization. 1089 """ 1090 if self.directed() or self.cache_policy_none(): 1091 return super(EntropySQLiteRepository, self).getStrictScopeData( 1092 package_id) 1093 1094 cached = self._getLiveCache("getStrictScopeData") 1095 if cached is None: 1096 cur = self._cursor().execute(""" 1097 SELECT idpackage, atom, slot, revision FROM baseinfo 1098 """) 1099 cached = dict((pkg_id, (atom, slot, rev)) for pkg_id, \ 1100 atom, slot, rev in cur) 1101 self._setLiveCache("getStrictScopeData", cached) 1102 # avoid python3.x memleak 1103 obj = cached.get(package_id) 1104 del cached 1105 return obj
1106
1107 - def getScopeData(self, package_id):
1108 """ 1109 Reimplemented from EntropySQLRepository. 1110 We must handle backward compatibility. 1111 """ 1112 if self._isBaseinfoExtrainfo2010(): 1113 return super(EntropySQLiteRepository, self).getScopeData( 1114 package_id) 1115 1116 # we must guarantee backward compatibility 1117 cur = self._cursor().execute(""" 1118 SELECT 1119 baseinfo.atom, 1120 categories.category, 1121 baseinfo.name, 1122 baseinfo.version, 1123 baseinfo.slot, 1124 baseinfo.versiontag, 1125 baseinfo.revision, 1126 baseinfo.branch, 1127 baseinfo.etpapi 1128 FROM 1129 baseinfo, 1130 categories 1131 WHERE 1132 baseinfo.idpackage = (?) 1133 and baseinfo.idcategory = categories.idcategory 1134 LIMIT 1 1135 """, (package_id,)) 1136 return cur.fetchone()
1137
1138 - def getBaseData(self, package_id):
1139 """ 1140 Reimplemented from EntropySQLRepository. 1141 We must handle backward compatibility. 1142 """ 1143 if self._isBaseinfoExtrainfo2010(): 1144 return super(EntropySQLiteRepository, self).getBaseData( 1145 package_id) 1146 1147 # we must guarantee backward compatibility 1148 sql = """ 1149 SELECT 1150 baseinfo.atom, 1151 baseinfo.name, 1152 baseinfo.version, 1153 baseinfo.versiontag, 1154 extrainfo.description, 1155 categories.category, 1156 flags.chost, 1157 flags.cflags, 1158 flags.cxxflags, 1159 extrainfo.homepage, 1160 licenses.license, 1161 baseinfo.branch, 1162 extrainfo.download, 1163 extrainfo.digest, 1164 baseinfo.slot, 1165 baseinfo.etpapi, 1166 extrainfo.datecreation, 1167 extrainfo.size, 1168 baseinfo.revision 1169 FROM 1170 baseinfo, 1171 extrainfo, 1172 categories, 1173 flags, 1174 licenses 1175 WHERE 1176 baseinfo.idpackage = (?) 1177 and baseinfo.idpackage = extrainfo.idpackage 1178 and baseinfo.idcategory = categories.idcategory 1179 and extrainfo.idflags = flags.idflags 1180 and baseinfo.idlicense = licenses.idlicense 1181 LIMIT 1 1182 """ 1183 cur = self._cursor().execute(sql, (package_id,)) 1184 return cur.fetchone()
1185
1186 - def retrieveDigest(self, package_id):
1187 """ 1188 Reimplemented from EntropySQLRepository. 1189 We must use the in-memory cache to do some memoization. 1190 """ 1191 if self.directed() or self.cache_policy_none(): 1192 return super(EntropySQLiteRepository, self).retrieveDigest( 1193 package_id) 1194 1195 cached = self._getLiveCache("retrieveDigest") 1196 if cached is None: 1197 cur = self._cursor().execute(""" 1198 SELECT idpackage, digest FROM extrainfo 1199 """) 1200 cached = dict(cur) 1201 self._setLiveCache("retrieveDigest", cached) 1202 # avoid python3.x memleak 1203 obj = cached.get(package_id) 1204 del cached 1205 return obj
1206
1207 - def retrieveExtraDownload(self, package_id, down_type = None):
1208 """ 1209 Reimplemented from EntropySQLRepository. 1210 We must handle backward compatibility. 1211 """ 1212 try: 1213 return super(EntropySQLiteRepository, 1214 self).retrieveExtraDownload( 1215 package_id, down_type = down_type) 1216 except OperationalError: 1217 if self._doesTableExist("packagedownloads"): 1218 raise 1219 return tuple()
1220
1221 - def retrieveKeySplit(self, package_id):
1222 """ 1223 Reimplemented from EntropySQLRepository. 1224 We must use the in-memory cache to do some memoization. 1225 We must handle _baseinfo_extrainfo_2010. 1226 """ 1227 if self.directed() or self.cache_policy_none(): 1228 return super(EntropySQLiteRepository, self).retrieveKeySplit( 1229 package_id) 1230 1231 cached = self._getLiveCache("retrieveKeySplit") 1232 if cached is None: 1233 if self._isBaseinfoExtrainfo2010(): 1234 cur = self._cursor().execute(""" 1235 SELECT idpackage, category, name FROM baseinfo 1236 """) 1237 else: 1238 cur = self._cursor().execute(""" 1239 SELECT baseinfo.idpackage, categories.category, 1240 baseinfo.name 1241 FROM baseinfo, categories 1242 WHERE categories.idcategory = baseinfo.idcategory 1243 """) 1244 cached = dict((pkg_id, (category, name)) for pkg_id, category, 1245 name in cur) 1246 self._setLiveCache("retrieveKeySplit", cached) 1247 1248 # avoid python3.x memleak 1249 obj = cached.get(package_id) 1250 del cached 1251 return obj
1252
1253 - def retrieveKeySlot(self, package_id):
1254 """ 1255 Reimplemented from EntropyRepositoryBase. 1256 We must use the in-memory cache to do some memoization. 1257 We must handle _baseinfo_extrainfo_2010. 1258 """ 1259 if self.directed() or self.cache_policy_none(): 1260 return super(EntropySQLiteRepository, self).retrieveKeySlot( 1261 package_id) 1262 1263 cached = self._getLiveCache("retrieveKeySlot") 1264 if cached is None: 1265 if self._isBaseinfoExtrainfo2010(): 1266 cur = self._cursor().execute(""" 1267 SELECT idpackage, category || "/" || name, 1268 slot FROM baseinfo 1269 """) 1270 else: 1271 cur = self._cursor().execute(""" 1272 SELECT baseinfo.idpackage, 1273 categories.category || "/" || baseinfo.name, 1274 baseinfo.slot 1275 FROM baseinfo, categories 1276 WHERE baseinfo.idcategory = categories.idcategory 1277 """) 1278 cached = dict((pkg_id, (key, slot)) for pkg_id, key, slot in \ 1279 cur) 1280 self._setLiveCache("retrieveKeySlot", cached) 1281 1282 # avoid python3.x memleak 1283 obj = cached.get(package_id) 1284 del cached 1285 return obj
1286
1287 - def retrieveKeySlotAggregated(self, package_id):
1288 """ 1289 Reimplemented from EntropyRepositoryBase. 1290 """ 1291 if self.directed() or self.cache_policy_none(): 1292 return super(EntropySQLiteRepository, 1293 self).retrieveKeySlotAggregated(package_id) 1294 1295 cached = self._getLiveCache("retrieveKeySlotAggregated") 1296 if cached is None: 1297 if self._isBaseinfoExtrainfo2010(): 1298 cur = self._cursor().execute(""" 1299 SELECT idpackage, category || "/" || name || "%s" || slot 1300 FROM baseinfo 1301 """ % (etpConst['entropyslotprefix'],)) 1302 else: 1303 cur = self._cursor().execute(""" 1304 SELECT baseinfo.idpackage, categories.category || "/" || 1305 baseinfo.name || "%s" || baseinfo.slot 1306 FROM baseinfo, categories 1307 WHERE baseinfo.idcategory = categories.idcategory 1308 """ % (etpConst['entropyslotprefix'],)) 1309 cached = dict((pkg_id, key) for pkg_id, key in cur.fetchall()) 1310 self._setLiveCache("retrieveKeySlotAggregated", cached) 1311 1312 # avoid python3.x memleak 1313 obj = cached.get(package_id) 1314 del cached 1315 return obj
1316
1317 - def retrieveKeySlotTag(self, package_id):
1318 """ 1319 Reimplemented from EntropyRepositoryBase. 1320 """ 1321 if self._isBaseinfoExtrainfo2010(): 1322 cur = self._cursor().execute(""" 1323 SELECT category || "/" || name, slot, 1324 versiontag FROM baseinfo WHERE 1325 idpackage = (?) LIMIT 1 1326 """, (package_id,)) 1327 else: 1328 cur = self._cursor().execute(""" 1329 SELECT categories.category || "/" || baseinfo.name, 1330 baseinfo.slot, baseinfo.versiontag 1331 FROM baseinfo, categories WHERE 1332 baseinfo.idpackage = (?) AND 1333 baseinfo.idcategory = categories.idcategory LIMIT 1 1334 """, (package_id,)) 1335 return cur.fetchone()
1336
1337 - def retrieveVersion(self, package_id):
1338 """ 1339 Reimplemented from EntropySQLRepository. 1340 We must use the in-memory cache to do some memoization. 1341 """ 1342 if self.directed() or self.cache_policy_none(): 1343 return super(EntropySQLiteRepository, self).retrieveVersion( 1344 package_id) 1345 1346 cached = self._getLiveCache("retrieveVersion") 1347 if cached is None: 1348 cur = self._cursor().execute(""" 1349 SELECT idpackage, version FROM baseinfo 1350 """) 1351 cached = dict(cur) 1352 self._setLiveCache("retrieveVersion", cached) 1353 1354 # avoid python3.x memleak 1355 obj = cached.get(package_id) 1356 del cached 1357 return obj
1358
1359 - def retrieveRevision(self, package_id):
1360 """ 1361 Reimplemented from EntropySQLRepository. 1362 We must use the in-memory cache to do some memoization. 1363 """ 1364 if self.directed() or self.cache_policy_none(): 1365 return super(EntropySQLiteRepository, self).retrieveRevision( 1366 package_id) 1367 1368 cached = self._getLiveCache("retrieveRevision") 1369 if cached is None: 1370 cur = self._cursor().execute(""" 1371 SELECT idpackage, revision FROM baseinfo 1372 """) 1373 cached = dict(cur) 1374 self._setLiveCache("retrieveRevision", cached) 1375 1376 # avoid python3.x memleak 1377 obj = cached.get(package_id) 1378 del cached 1379 return obj
1380
1381 - def retrieveUseflags(self, package_id):
1382 """ 1383 Reimplemented from EntropySQLRepository. 1384 We must use the in-memory cache to do some memoization. 1385 """ 1386 if self.directed() or self.cache_policy_none(): 1387 return super(EntropySQLiteRepository, self).retrieveUseflags( 1388 package_id) 1389 1390 cached = self._getLiveCache("retrieveUseflags") 1391 if cached is None: 1392 cur = self._cursor().execute(""" 1393 SELECT useflags.idpackage, useflagsreference.flagname 1394 FROM useflags, useflagsreference 1395 WHERE useflags.idflag = useflagsreference.idflag 1396 """) 1397 cached = {} 1398 for pkg_id, flag in cur: 1399 obj = cached.setdefault(pkg_id, set()) 1400 obj.add(flag) 1401 self._setLiveCache("retrieveUseflags", cached) 1402 1403 # avoid python3.x memleak 1404 obj = frozenset(cached.get(package_id, frozenset())) 1405 del cached 1406 return obj
1407
1408 - def retrieveDependencies(self, package_id, extended = False, 1409 deptype = None, exclude_deptypes = None, 1410 resolve_conditional_deps = True):
1411 """ 1412 Reimplemented from EntropyRepositoryBase. 1413 We must use the in-memory cache to do some memoization. 1414 """ 1415 if self.directed() or self.cache_policy_none(): 1416 return super(EntropySQLiteRepository, self).retrieveDependencies( 1417 package_id, extended = extended, deptype = deptype, 1418 exclude_deptypes = exclude_deptypes, 1419 resolve_conditional_deps = resolve_conditional_deps) 1420 1421 cached = self._getLiveCache("retrieveDependencies") 1422 if cached is None: 1423 cur = self._cursor().execute(""" 1424 SELECT dependencies.idpackage, 1425 dependenciesreference.dependency, 1426 dependencies.type 1427 FROM dependencies, dependenciesreference 1428 WHERE dependencies.iddependency = dependenciesreference.iddependency 1429 """) 1430 1431 cached = {} 1432 for pkg_id, dependency, dependency_type in cur: 1433 obj = cached.setdefault(pkg_id, collections.deque()) 1434 obj.append((dependency, dependency_type)) 1435 self._setLiveCache("retrieveDependencies", cached) 1436 1437 data = cached.get(package_id, collections.deque()) 1438 if deptype is not None: 1439 data = iter([x for x in data if x[1] == deptype]) 1440 elif exclude_deptypes is not None: 1441 excl_set = frozenset(exclude_deptypes) 1442 data = iter([x for x in data if x[1] not in excl_set]) 1443 1444 iter_obj = tuple 1445 if extended: 1446 data = iter(data) 1447 else: 1448 iter_obj = frozenset 1449 data = iter((x for x, _x in data)) 1450 1451 # avoid python3.x memleak 1452 del cached 1453 1454 if resolve_conditional_deps: 1455 return iter_obj(entropy.dep.expand_dependencies( 1456 data, [self])) 1457 return iter_obj(data)
1458
1459 - def retrieveDesktopMime(self, package_id):
1460 """ 1461 Reimplemented from EntropySQLRepository. 1462 We must handle backward compatibility. 1463 """ 1464 try: 1465 return super(EntropySQLiteRepository, 1466 self).retrieveDesktopMime(package_id) 1467 except OperationalError: 1468 if self._doesTableExist("packagedesktopmime"): 1469 raise 1470 return []
1471
1472 - def retrieveProvidedMime(self, package_id):
1473 """ 1474 Reimplemented from EntropySQLRepository. 1475 We must handle backward compatibility. 1476 """ 1477 try: 1478 return super(EntropySQLiteRepository, 1479 self).retrieveProvidedMime(package_id) 1480 except OperationalError: 1481 if self._doesTableExist("provided_mime"): 1482 raise 1483 return frozenset()
1484
1485 - def retrieveContentSafety(self, package_id):
1486 """ 1487 Reimplemented from EntropySQLRepository. 1488 We must handle backward compatibility. 1489 """ 1490 try: 1491 return super(EntropySQLiteRepository, 1492 self).retrieveContentSafety(package_id) 1493 except OperationalError: 1494 # TODO: remove after 2013? 1495 if self._doesTableExist('contentsafety'): 1496 raise 1497 return {}
1498
1499 - def retrieveContentSafetyIter(self, package_id):
1500 """ 1501 Reimplemented from EntropySQLRepository. 1502 We must handle backward compatibility. 1503 """ 1504 try: 1505 return super(EntropySQLiteRepository, 1506 self).retrieveContentSafetyIter(package_id) 1507 except OperationalError: 1508 # TODO: remove after 2013? 1509 if self._doesTableExist('contentsafety'): 1510 raise 1511 return iter([])
1512
1513 - def retrieveChangelog(self, package_id):
1514 """ 1515 Reimplemented from EntropySQLRepository. 1516 We must handle _baseinfo_extrainfo_2010. 1517 """ 1518 if self._isBaseinfoExtrainfo2010(): 1519 return super(EntropySQLiteRepository, 1520 self).retrieveChangelog(package_id) 1521 1522 cur = self._cursor().execute(""" 1523 SELECT packagechangelogs.changelog 1524 FROM packagechangelogs, baseinfo, categories 1525 WHERE baseinfo.idpackage = (?) AND 1526 baseinfo.idcategory = categories.idcategory AND 1527 packagechangelogs.name = baseinfo.name AND 1528 packagechangelogs.category = categories.category 1529 LIMIT 1 1530 """, (package_id,)) 1531 changelog = cur.fetchone() 1532 if changelog: 1533 changelog = changelog[0] 1534 try: 1535 return const_convert_to_unicode(changelog) 1536 except UnicodeDecodeError: 1537 return const_convert_to_unicode( 1538 changelog, enctype = 'utf-8')
1539
1540 - def retrieveSlot(self, package_id):
1541 """ 1542 Reimplemented from EntropySQLRepository. 1543 We must use the in-memory cache to do some memoization. 1544 """ 1545 if self.directed() or self.cache_policy_none(): 1546 return super(EntropySQLiteRepository, self).retrieveSlot( 1547 package_id) 1548 1549 cached = self._getLiveCache("retrieveSlot") 1550 if cached is None: 1551 cur = self._cursor().execute(""" 1552 SELECT idpackage, slot FROM baseinfo 1553 """) 1554 cached = dict(cur) 1555 self._setLiveCache("retrieveSlot", cached) 1556 1557 # avoid python3.x memleak 1558 obj = cached.get(package_id) 1559 del cached 1560 return obj
1561
1562 - def retrieveTag(self, package_id):
1563 """ 1564 Reimplemented from EntropySQLRepository. 1565 We must use the in-memory cache to do some memoization. 1566 """ 1567 if self.directed() or self.cache_policy_none(): 1568 return super(EntropySQLiteRepository, self).retrieveTag( 1569 package_id) 1570 1571 cached = self._getLiveCache("retrieveTag") 1572 # gain 2% speed on atomMatch() 1573 if cached is None: 1574 cur = self._cursor().execute(""" 1575 SELECT idpackage, versiontag FROM baseinfo 1576 """) 1577 cached = dict(cur) 1578 self._setLiveCache("retrieveTag", cached) 1579 1580 # avoid python3.x memleak 1581 obj = cached.get(package_id) 1582 del cached 1583 return obj
1584
1585 - def retrieveCategory(self, package_id):
1586 """ 1587 Reimplemented from EntropySQLRepository. 1588 We must handle _baseinfo_extrainfo_2010. 1589 We must use the in-memory cache to do some memoization. 1590 """ 1591 if self.directed() or self.cache_policy_none(): 1592 return super(EntropySQLiteRepository, self).retrieveCategory( 1593 package_id) 1594 1595 cached = self._getLiveCache("retrieveCategory") 1596 # this gives 14% speed boost in atomMatch() 1597 if cached is None: 1598 if self._isBaseinfoExtrainfo2010(): 1599 cur = self._cursor().execute(""" 1600 SELECT idpackage, category FROM baseinfo 1601 """) 1602 else: 1603 cur = self._cursor().execute(""" 1604 SELECT baseinfo.idpackage, categories.category 1605 FROM baseinfo,categories WHERE 1606 baseinfo.idcategory = categories.idcategory 1607 """) 1608 cached = dict(cur) 1609 self._setLiveCache("retrieveCategory", cached) 1610 1611 # avoid python3.x memleak 1612 obj = cached.get(package_id) 1613 del cached 1614 return obj
1615
1616 - def retrieveCompileFlags(self, package_id):
1617 """ 1618 Reimplemented from EntropySQLRepository. 1619 We must handle _baseinfo_extrainfo_2010. 1620 """ 1621 if self._isBaseinfoExtrainfo2010(): 1622 return super(EntropySQLiteRepository, 1623 self).retrieveCompileFlags(package_id) 1624 1625 cur = self._cursor().execute(""" 1626 SELECT chost,cflags,cxxflags FROM flags,extrainfo 1627 WHERE extrainfo.idpackage = (?) AND 1628 extrainfo.idflags = flags.idflags 1629 LIMIT 1""", (package_id,)) 1630 flags = cur.fetchone() 1631 if not flags: 1632 flags = ("N/A", "N/A", "N/A") 1633 return flags
1634
1635 - def searchLicense(self, keyword, just_id = False):
1636 """ 1637 Reimplemented from EntropySQLRepository. 1638 We must handle _baseinfo_extrainfo_2010. 1639 """ 1640 if self._isBaseinfoExtrainfo2010(): 1641 return super(EntropySQLiteRepository, 1642 self).searchLicense(keyword, just_id = just_id) 1643 1644 # backward compatibility 1645 if not entropy.tools.is_valid_string(keyword): 1646 return frozenset() 1647 1648 license_query = """baseinfo, licenses 1649 WHERE LOWER(licenses.license) LIKE (?) AND 1650 licenses.idlicense = baseinfo.idlicense""" 1651 if just_id: 1652 cur = self._cursor().execute(""" 1653 SELECT baseinfo.idpackage FROM %s 1654 """ % (license_query,), ("%"+keyword+"%".lower(),)) 1655 return self._cur2frozenset(cur) 1656 else: 1657 cur = self._cursor().execute(""" 1658 SELECT baseinfo.atom, baseinfo.idpackage FROM %s 1659 """ % (license_query,), ("%"+keyword+"%".lower(),)) 1660 return frozenset(cur)
1661
1662 - def searchKeySlot(self, key, slot):
1663 """ 1664 Reimplemented from EntropySQLRepository. 1665 We must handle _baseinfo_extrainfo_2010. 1666 We must use the in-memory cache to do some memoization. 1667 """ 1668 if self.directed() or self.cache_policy_none(): 1669 return super(EntropySQLiteRepository, self).searchKeySlot( 1670 key, slot) 1671 1672 cached = self._getLiveCache("searchKeySlot") 1673 if cached is None: 1674 if self._isBaseinfoExtrainfo2010(): 1675 cur = self._cursor().execute(""" 1676 SELECT category, name, slot, idpackage FROM baseinfo 1677 """) 1678 else: 1679 cur = self._cursor().execute(""" 1680 SELECT categories.category, baseinfo.name, baseinfo.slot, 1681 baseinfo.idpackage 1682 FROM baseinfo, categories 1683 WHERE baseinfo.idcategory = categories.idcategory 1684 """) 1685 cached = {} 1686 for d_cat, d_name, d_slot, pkg_id in cur: 1687 obj = cached.setdefault( 1688 (d_cat, d_name, d_slot), set()) 1689 obj.add(pkg_id) 1690 self._setLiveCache("searchKeySlot", cached) 1691 1692 cat, name = key.split("/", 1) 1693 # avoid python3.x memleak 1694 obj = frozenset(cached.get((cat, name, slot), frozenset())) 1695 del cached 1696 return obj
1697
1698 - def searchKeySlotTag(self, key, slot, tag):
1699 """ 1700 Reimplemented from EntropySQLRepository. 1701 We must handle _baseinfo_extrainfo_2010. 1702 We must use the in-memory cache to do some memoization. 1703 """ 1704 if self.directed() or self.cache_policy_none(): 1705 return super(EntropySQLiteRepository, self).searchKeySlotTag( 1706 key, slot, tag) 1707 1708 cached = self._getLiveCache("searchKeySlotTag") 1709 if cached is None: 1710 if self._isBaseinfoExtrainfo2010(): 1711 cur = self._cursor().execute(""" 1712 SELECT category, name, slot, versiontag, idpackage 1713 FROM baseinfo 1714 """) 1715 else: 1716 cur = self._cursor().execute(""" 1717 SELECT categories.category, baseinfo.name, baseinfo.slot, 1718 baseinfo.versiontag, baseinfo.idpackage 1719 FROM baseinfo, categories 1720 WHERE baseinfo.idcategory = categories.idcategory 1721 """) 1722 cached = {} 1723 for d_cat, d_name, d_slot, d_tag, pkg_id in cur.fetchall(): 1724 obj = cached.setdefault( 1725 (d_cat, d_name, d_slot, d_tag), set()) 1726 obj.add(pkg_id) 1727 self._setLiveCache("searchKeySlotTag", cached) 1728 1729 cat, name = key.split("/", 1) 1730 # avoid python3.x memleak 1731 obj = frozenset(cached.get((cat, name, slot, tag), frozenset())) 1732 del cached 1733 return obj
1734
1735 - def searchSets(self, keyword):
1736 """ 1737 Reimplemented from EntropySQLRepository. 1738 We must handle backward compatibility. 1739 """ 1740 try: 1741 return super(EntropySQLiteRepository, self).searchSets(keyword) 1742 except OperationalError: 1743 # TODO: remove this after 2012? 1744 if self._doesTableExist("packagesets"): 1745 raise 1746 return frozenset()
1747
1748 - def searchProvidedMime(self, mimetype):
1749 """ 1750 Reimplemented from EntropySQLRepository. 1751 We must handle backward compatibility. 1752 """ 1753 try: 1754 return super(EntropySQLiteRepository, 1755 self).searchProvidedMime(mimetype) 1756 except OperationalError: 1757 # TODO: remove this after 2012? 1758 if self._doesTableExist("provided_mime"): 1759 raise 1760 return tuple()
1761
1762 - def searchCategory(self, keyword, like = False, just_id = True):
1763 """ 1764 Reimplemented from EntropySQLRepository. 1765 We must handle _baseinfo_extrainfo_2010. 1766 """ 1767 if self._isBaseinfoExtrainfo2010(): 1768 return super(EntropySQLiteRepository, 1769 self).searchCategory( 1770 keyword, like = like, just_id = just_id) 1771 1772 # backward compatibility 1773 like_string = "= (?)" 1774 if like: 1775 like_string = "LIKE (?)" 1776 1777 if just_id: 1778 cur = self._cursor().execute(""" 1779 SELECT baseinfo.idpackage FROM baseinfo, categories 1780 WHERE categories.category %s AND 1781 baseinfo.idcategory = categories.idcategory 1782 """ % (like_string,), (keyword,)) 1783 else: 1784 cur = self._cursor().execute(""" 1785 SELECT baseinfo.atom,baseinfo.idpackage 1786 FROM baseinfo, categories 1787 WHERE categories.category %s AND 1788 baseinfo.idcategory = categories.idcategory 1789 """ % (like_string,), (keyword,)) 1790 1791 if just_id: 1792 return self._cur2frozenset(cur) 1793 return frozenset(cur)
1794
1795 - def searchNameCategory(self, name, category, just_id = False):
1796 """ 1797 Reimplemented from EntropySQLRepository. 1798 We must handle _baseinfo_extrainfo_2010. 1799 We must use the in-memory cache to do some memoization. 1800 """ 1801 if self.directed() or self.cache_policy_none(): 1802 return super(EntropySQLiteRepository, self).searchNameCategory( 1803 name, category, just_id = just_id) 1804 1805 cached = self._getLiveCache("searchNameCategory") 1806 # this gives 30% speed boost on atomMatch() 1807 if cached is None: 1808 if self._isBaseinfoExtrainfo2010(): 1809 cur = self._cursor().execute(""" 1810 SELECT name, category, atom, idpackage FROM baseinfo 1811 """) 1812 else: 1813 cur = self._cursor().execute(""" 1814 SELECT baseinfo.name,categories.category, 1815 baseinfo.atom, baseinfo.idpackage FROM baseinfo,categories 1816 WHERE baseinfo.idcategory = categories.idcategory 1817 """) 1818 cached = {} 1819 for nam, cat, atom, pkg_id in cur: 1820 obj = cached.setdefault((nam, cat), set()) 1821 obj.add((atom, pkg_id)) 1822 self._setLiveCache("searchNameCategory", cached) 1823 1824 data = frozenset(cached.get((name, category), frozenset())) 1825 # This avoids memory leaks with python 3.x 1826 del cached 1827 1828 if just_id: 1829 return frozenset((y for x, y in data)) 1830 return data
1831
1832 - def listPackageIdsInCategory(self, category, order_by = None):
1833 """ 1834 Reimplemented from EntropySQLRepository. 1835 We must handle _baseinfo_extrainfo_2010. 1836 """ 1837 if self._isBaseinfoExtrainfo2010(): 1838 return super(EntropySQLiteRepository, 1839 self).listPackageIdsInCategory( 1840 category, order_by = order_by) 1841 1842 # backward compatibility 1843 order_by_string = '' 1844 if order_by is not None: 1845 valid_order_by = ("atom", "idpackage", "package_id", "branch", 1846 "name", "version", "versiontag", "revision", "slot") 1847 if order_by not in valid_order_by: 1848 raise AttributeError("invalid order_by argument") 1849 if order_by == "package_id": 1850 order_by = "idpackage" 1851 order_by_string = ' order by %s' % (order_by,) 1852 1853 cur = self._cursor().execute(""" 1854 SELECT idpackage FROM baseinfo, categories WHERE 1855 categories.category = (?) AND 1856 baseinfo.idcategory = categories.idcategory 1857 """ + order_by_string, (category,)) 1858 return self._cur2frozenset(cur)
1859
1860 - def listAllExtraDownloads(self, do_sort = True):
1861 """ 1862 Reimplemented from EntropySQLRepository. 1863 We must handle backward compatibility. 1864 """ 1865 try: 1866 return super(EntropySQLiteRepository, 1867 self).listAllExtraDownloads( 1868 do_sort = do_sort) 1869 except OperationalError: 1870 if self._doesTableExist("packagedownloads"): 1871 raise 1872 return tuple()
1873
1874 - def listAllCategories(self, order_by = None):
1875 """ 1876 Reimplemented from EntropySQLRepository. 1877 We must handle _baseinfo_extrainfo_2010. 1878 """ 1879 if self._isBaseinfoExtrainfo2010(): 1880 return super(EntropySQLiteRepository, 1881 self).listAllCategories( 1882 order_by = order_by) 1883 1884 # backward compatibility 1885 order_by_string = '' 1886 if order_by is not None: 1887 valid_order_by = ("category",) 1888 if order_by not in valid_order_by: 1889 raise AttributeError("invalid order_by argument") 1890 order_by_string = 'ORDER BY %s' % (order_by,) 1891 1892 cur = self._cursor().execute( 1893 "SELECT category FROM categories %s" % (order_by_string,)) 1894 return self._cur2frozenset(cur)
1895
1896 - def _setupInitialSettings(self):
1897 """ 1898 Setup initial repository settings 1899 """ 1900 query = """ 1901 INSERT OR REPLACE INTO settings VALUES ("arch", "%s"); 1902 INSERT OR REPLACE INTO settings VALUES ("on_delete_cascade", "%s"); 1903 INSERT OR REPLACE INTO settings VALUES ("_baseinfo_extrainfo_2010", 1904 "%s"); 1905 """ % (etpConst['currentarch'], "1", "1") 1906 self._cursor().executescript(query) 1907 self.commit() 1908 self._settings_cache.clear()
1909
1910 - def _databaseSchemaUpdates(self):
1911 """ 1912 Do not forget to bump _SCHEMA_REVISION whenever you add more tables 1913 """ 1914 1915 def must_run(): 1916 try: 1917 current_schema_rev = int(self.getSetting("schema_revision")) 1918 except (KeyError, ValueError): 1919 current_schema_rev = -1 1920 1921 if current_schema_rev == EntropySQLiteRepository._SCHEMA_REVISION \ 1922 and not os.getenv("ETP_REPO_SCHEMA_UPDATE"): 1923 return False 1924 return True
1925 1926 if not must_run(): 1927 return 1928 1929 try: 1930 with self.exclusive(): 1931 if not must_run(): 1932 return 1933 self._databaseSchemaUpdatesUnlocked() 1934 except LockAcquireError as err: 1935 const_debug_write( 1936 __name__, 1937 "_maybeDatabaseSchemaUpdates error: %s" % (err,)) 1938
1939 - def _databaseSchemaUpdatesUnlocked(self):
1940 """ 1941 Internal version of _databaseSchemaUpdates. This method assumes that 1942 the Repository lock is acquired in exclusive mode. 1943 """ 1944 old_readonly = self._readonly 1945 self._readonly = False 1946 1947 if not self._doesTableExist("packagedesktopmime"): 1948 self._createPackageDesktopMimeTable() 1949 if not self._doesTableExist("provided_mime"): 1950 self._createProvidedMimeTable() 1951 1952 if not self._doesTableExist("licenses_accepted"): 1953 self._createLicensesAcceptedTable() 1954 1955 if not self._doesColumnInTableExist("installedtable", "source"): 1956 self._createInstalledTableSource() 1957 1958 if not self._doesColumnInTableExist("provide", "is_default"): 1959 self._createProvideDefault() 1960 1961 if not self._doesTableExist("packagesets"): 1962 self._createPackagesetsTable() 1963 1964 if not self._doesTableExist("packagechangelogs"): 1965 self._createPackagechangelogsTable() 1966 1967 if not self._doesTableExist("automergefiles"): 1968 self._createAutomergefilesTable() 1969 1970 if not self._doesTableExist("packagesignatures"): 1971 self._createPackagesignaturesTable() 1972 elif not self._doesColumnInTableExist("packagesignatures", "gpg"): 1973 self._createPackagesignaturesGpgColumn() 1974 1975 if not self._doesTableExist("packagespmphases"): 1976 self._createPackagespmphases() 1977 1978 if not self._doesTableExist("packagespmrepository"): 1979 self._createPackagespmrepository() 1980 1981 if not self._doesTableExist("entropy_branch_migration"): 1982 self._createEntropyBranchMigrationTable() 1983 1984 if not self._doesTableExist("settings"): 1985 self._createSettingsTable() 1986 1987 # added on Aug, 2010 1988 if not self._doesTableExist("contentsafety"): 1989 self._createContentSafetyTable() 1990 if not self._doesTableExist('provided_libs'): 1991 self._createProvidedLibs() 1992 1993 # added on Aug. 2011 1994 if not self._doesTableExist("packagedownloads"): 1995 self._createPackageDownloadsTable() 1996 1997 # added on Nov. 2013 1998 if not self._doesTableExist("preserved_libs"): 1999 self._createPreservedLibsTable() 2000 if not self._doesColumnInTableExist("preserved_libs", "atom"): 2001 self._createPreservedLibsAtomColumn() 2002 2003 # added on Sept. 2014, keep forever? ;-) 2004 self._migrateNeededLibs() 2005 2006 # added on Sept. 2010, keep forever? ;-) 2007 self._migrateBaseinfoExtrainfo() 2008 2009 self._foreignKeySupport() 2010 2011 self._readonly = old_readonly 2012 self._connection().commit() 2013 2014 if not old_readonly: 2015 # it seems that it's causing locking issues 2016 # so, just execute it when in read/write mode 2017 self._setSetting("schema_revision", 2018 EntropySQLiteRepository._SCHEMA_REVISION) 2019 self._connection().commit()
2020
2021 - def integrity_check(self):
2022 """ 2023 Reimplemented from EntropyRepositoryBase. 2024 """ 2025 cur = self._cursor().execute("PRAGMA quick_check(1)") 2026 try: 2027 check_data = cur.fetchone()[0] 2028 if check_data != "ok": 2029 raise ValueError() 2030 except (IndexError, ValueError, TypeError,): 2031 raise SystemDatabaseError( 2032 "sqlite3 reports database being corrupted")
2033 2034 @staticmethod
2035 - def importRepository(dumpfile, db, data = None):
2036 """ 2037 Reimplemented from EntropyRepositoryBase. 2038 @todo: remove /usr/bin/sqlite3 dependency 2039 """ 2040 dbfile = os.path.realpath(db) 2041 tmp_dbfile = dbfile + ".import_repository" 2042 dumpfile = os.path.realpath(dumpfile) 2043 if not entropy.tools.is_valid_path_string(dbfile): 2044 raise AttributeError("dbfile value is invalid") 2045 if not entropy.tools.is_valid_path_string(dumpfile): 2046 raise AttributeError("dumpfile value is invalid") 2047 with open(dumpfile, "rb") as in_f: 2048 try: 2049 proc = subprocess.Popen(("/usr/bin/sqlite3", tmp_dbfile,), 2050 bufsize = -1, stdin = in_f) 2051 except OSError: 2052 # ouch ! wtf! 2053 return 1 2054 rc = proc.wait() 2055 if rc == 0: 2056 os.rename(tmp_dbfile, dbfile) 2057 return rc
2058
2059 - def exportRepository(self, dumpfile):
2060 """ 2061 Reimplemented from EntropyRepositoryBase. 2062 """ 2063 exclude_tables = [] 2064 gentle_with_tables = True 2065 toraw = const_convert_to_rawstring 2066 2067 dumpfile.write(toraw("BEGIN TRANSACTION;\n")) 2068 cur = self._cursor().execute(""" 2069 SELECT name, type, sql FROM sqlite_master 2070 WHERE sql NOT NULL AND type=='table' 2071 """) 2072 for name, x, sql in cur.fetchall(): 2073 2074 self.output( 2075 red("%s " % ( 2076 _("Exporting database table"), 2077 ) ) + "["+blue(str(name))+"]", 2078 importance = 0, 2079 level = "info", 2080 back = True, 2081 header = " " 2082 ) 2083 if name.startswith("sqlite_"): 2084 continue 2085 2086 t_cmd = "CREATE TABLE" 2087 if sql.startswith(t_cmd) and gentle_with_tables: 2088 sql = "CREATE TABLE IF NOT EXISTS"+sql[len(t_cmd):] 2089 dumpfile.write(toraw("%s;\n" % sql)) 2090 2091 if name in exclude_tables: 2092 continue 2093 2094 cur2 = self._cursor().execute("PRAGMA table_info('%s')" % name) 2095 cols = [r[1] for r in cur2.fetchall()] 2096 q = "SELECT 'INSERT INTO \"%(tbl_name)s\" VALUES(" 2097 q += ", ".join(["'||quote(" + x + ")||'" for x in cols]) 2098 q += ")' FROM '%(tbl_name)s'" 2099 self._connection().unicode() 2100 cur3 = self._cursor().execute(q % {'tbl_name': name}) 2101 for row in cur3: 2102 dumpfile.write(toraw("%s;\n" % (row[0],))) 2103 2104 cur4 = self._cursor().execute(""" 2105 SELECT name, type, sql FROM sqlite_master 2106 WHERE sql NOT NULL AND type!='table' AND type!='meta' 2107 """) 2108 for name, x, sql in cur4.fetchall(): 2109 dumpfile.write(toraw("%s;\n" % sql)) 2110 2111 dumpfile.write(toraw("COMMIT;\n")) 2112 if hasattr(dumpfile, 'flush'): 2113 dumpfile.flush() 2114 2115 self.output( 2116 red(_("Database Export complete.")), 2117 importance = 0, 2118 level = "info", 2119 header = " " 2120 )
2121 # remember to close the file 2122
2123 - def _listAllTables(self):
2124 """ 2125 List all available tables in this repository database. 2126 2127 @return: available tables 2128 @rtype: list 2129 """ 2130 cur = self._cursor().execute(""" 2131 SELECT name FROM SQLITE_MASTER 2132 WHERE type = "table" AND NOT name LIKE "sqlite_%" 2133 """) 2134 return self._cur2tuple(cur)
2135
2136 - def mtime(self):
2137 """ 2138 Reimplemented from EntropyRepositoryBase. 2139 """ 2140 if self._db is None: 2141 return 0.0 2142 if self._is_memory(): 2143 return 0.0 2144 return os.path.getmtime(self._db)
2145
2146 - def checksum(self, do_order = False, strict = True, 2147 include_signatures = False, include_dependencies = False):
2148 """ 2149 Reimplemented from EntropySQLRepository. 2150 We have to handle _baseinfo_extrainfo_2010. 2151 We must use the in-memory cache to do some memoization. 2152 """ 2153 _baseinfo_extrainfo_2010 = self._isBaseinfoExtrainfo2010() 2154 if _baseinfo_extrainfo_2010: 2155 return super(EntropySQLiteRepository, 2156 self).checksum( 2157 do_order = do_order, 2158 strict = strict, 2159 include_signatures = include_signatures) 2160 2161 # backward compatibility 2162 # !!! keep aligned !!! 2163 cache_key = "checksum_%s_%s_True_%s" % ( 2164 do_order, strict, include_signatures) 2165 cached = self._getLiveCache(cache_key) 2166 if cached is not None: 2167 return cached 2168 # avoid memleak with python3.x 2169 del cached 2170 2171 package_id_order = "" 2172 category_order = "" 2173 license_order = "" 2174 flags_order = "" 2175 depenenciesref_order = "" 2176 dependencies_order = "" 2177 if do_order: 2178 package_id_order = "order by idpackage" 2179 category_order = "order by category" 2180 license_order = "order by license" 2181 flags_order = "order by chost" 2182 dependenciesref_order = "order by iddependency" 2183 dependencies_order = "order by idpackage" 2184 2185 def do_update_hash(m, cursor): 2186 # this could slow things down a lot, so be careful 2187 # NOTE: this function must guarantee platform, architecture, 2188 # interpreter independent results. Cannot use hash() then. 2189 # Even repr() might be risky! But on the other hand, the 2190 # conversion to string cannot take forever. 2191 if const_is_python3(): 2192 for record in cursor: 2193 m.update(repr(record).encode("utf-8")) 2194 else: 2195 for record in cursor: 2196 m.update(repr(record))
2197 2198 m = hashlib.sha1() 2199 2200 if not self._doesTableExist("baseinfo"): 2201 m.update(const_convert_to_rawstring("~empty~")) 2202 result = m.hexdigest() 2203 self._setLiveCache(cache_key, result) 2204 return result 2205 2206 if strict: 2207 cur = self._cursor().execute(""" 2208 SELECT * FROM baseinfo 2209 %s""" % (package_id_order,)) 2210 else: 2211 cur = self._cursor().execute(""" 2212 SELECT idpackage, atom, name, version, versiontag, revision, 2213 branch, slot, etpapi, trigger FROM baseinfo 2214 %s""" % (package_id_order,)) 2215 2216 do_update_hash(m, cur) 2217 2218 if strict: 2219 cur = self._cursor().execute(""" 2220 SELECT * FROM extrainfo %s 2221 """ % (package_id_order,)) 2222 else: 2223 cur = self._cursor().execute(""" 2224 SELECT idpackage, description, homepage, download, size, 2225 digest, datecreation FROM extrainfo %s 2226 """ % (package_id_order,)) 2227 2228 do_update_hash(m, cur) 2229 2230 cur = self._cursor().execute(""" 2231 SELECT category FROM categories %s 2232 """ % (category_order,)) 2233 do_update_hash(m, cur) 2234 2235 d_hash = "0" 2236 e_hash = "0" 2237 if strict: 2238 cur = self._cursor().execute(""" 2239 SELECT * FROM licenses %s""" % (license_order,)) 2240 do_update_hash(m, cur) 2241 2242 cur = self._cursor().execute('select * from flags %s' % ( 2243 flags_order,)) 2244 do_update_hash(m, cur) 2245 2246 if include_signatures: 2247 cur = self._cursor().execute(""" 2248 SELECT idpackage, sha1, gpg FROM 2249 packagesignatures %s""" % (package_id_order,)) 2250 2251 do_update_hash(m, cur) 2252 2253 if include_dependencies: 2254 cur = self._cursor().execute(""" 2255 SELECT * from dependenciesreference %s 2256 """ % (dependenciesref_order,)) 2257 do_update_hash(m, cur) 2258 2259 cur = self._cursor().execute(""" 2260 SELECT * from dependencies %s 2261 """ % (dependencies_order,)) 2262 do_update_hash(m, cur) 2263 2264 result = m.hexdigest() 2265 self._setLiveCache(cache_key, result) 2266 return result 2267
2268 - def storeInstalledPackage(self, package_id, repoid, source = 0):
2269 """ 2270 Reimplemented from EntropySQLRepository. 2271 """ 2272 super(EntropySQLiteRepository, self).storeInstalledPackage( 2273 package_id, repoid, source = source) 2274 self._clearLiveCache("getInstalledPackageRepository") 2275 self._clearLiveCache("getInstalledPackageSource")
2276
2277 - def getInstalledPackageRepository(self, package_id):
2278 """ 2279 Reimplemented from EntropySQLRepository. 2280 We must use the in-memory cache to do some memoization. 2281 """ 2282 if self.directed() or self.cache_policy_none(): 2283 return super(EntropySQLiteRepository, 2284 self).getInstalledPackageRepository( 2285 package_id) 2286 2287 cached = self._getLiveCache("getInstalledPackageRepository") 2288 if cached is None: 2289 cur = self._cursor().execute(""" 2290 SELECT idpackage, repositoryname FROM installedtable 2291 """) 2292 cached = dict(cur) 2293 self._setLiveCache("getInstalledPackageRepository", cached) 2294 2295 # avoid python3.x memleak 2296 obj = cached.get(package_id) 2297 del cached 2298 return obj
2299
2300 - def getInstalledPackageSource(self, package_id):
2301 """ 2302 Reimplemented from EntropySQLRepositoryBase. 2303 We must use the in-memory cache to do some memoization. 2304 """ 2305 if self.directed() or self.cache_policy_none(): 2306 return super(EntropySQLiteRepository, 2307 self).getInstalledPackageSource( 2308 package_id) 2309 2310 cached = self._getLiveCache("getInstalledPackageSource") 2311 if cached is None: 2312 try: 2313 # be optimistic, delay _doesColumnInTableExist as much as 2314 # possible 2315 cur = self._cursor().execute(""" 2316 SELECT idpackage, source FROM installedtable 2317 """) 2318 cached = dict(cur) 2319 except OperationalError as err: 2320 # TODO: drop this check in future, backward compatibility 2321 if self._doesColumnInTableExist( 2322 "installedtable", "source"): 2323 raise 2324 cached = {} 2325 self._setLiveCache("getInstalledPackageSource", cached) 2326 2327 # avoid python3.x memleak 2328 obj = cached.get(package_id) 2329 del cached 2330 return obj
2331
2332 - def dropInstalledPackageFromStore(self, package_id):
2333 """ 2334 Reimplemented from EntropySQLRepository. 2335 We must handle live cache. 2336 """ 2337 super(EntropySQLiteRepository, self).dropInstalledPackageFromStore( 2338 package_id) 2339 self._clearLiveCache("getInstalledPackageRepository") 2340 self._clearLiveCache("getInstalledPackageSource")
2341
2342 - def retrieveSpmMetadata(self, package_id):
2343 """ 2344 Reimplemented from EntropySQLRepository. 2345 We must handle backward compatibility. 2346 """ 2347 try: 2348 return super(EntropySQLiteRepository, 2349 self).retrieveSpmMetadata( 2350 package_id) 2351 except OperationalError: 2352 if self._doesTableExist("xpakdata"): 2353 raise 2354 buf = const_get_buffer() 2355 return buf("")
2356
2357 - def retrieveBranchMigration(self, to_branch):
2358 """ 2359 Reimplemented from EntropySQLRepository. 2360 We must handle backward compatibility. 2361 """ 2362 try: 2363 return super(EntropySQLiteRepository, 2364 self).retrieveBranchMigration( 2365 to_branch) 2366 except OperationalError: 2367 if self._doesTableExist('entropy_branch_migration'): 2368 raise 2369 return {}
2370
2371 - def dropContentSafety(self):
2372 """ 2373 Reimplemented from EntropySQLRepository. 2374 We must handle backward compatibility. 2375 """ 2376 try: 2377 return super(EntropySQLiteRepository, 2378 self).dropContentSafety() 2379 except OperationalError: 2380 if self._doesTableExist('contentsafety'): 2381 raise
2382 # table doesn't exist, ignore 2383
2384 - def dropAllIndexes(self):
2385 """ 2386 Reimplemented from EntropyRepositoryBase. 2387 """ 2388 cur = self._cursor().execute(""" 2389 SELECT name FROM SQLITE_MASTER WHERE type = "index" 2390 AND name NOT LIKE "sqlite_%" 2391 """) 2392 for index in self._cur2frozenset(cur): 2393 try: 2394 self._cursor().execute('DROP INDEX IF EXISTS %s' % (index,)) 2395 except OperationalError: 2396 continue
2397
2398 - def createAllIndexes(self):
2399 """ 2400 Reimplemented from EntropySQLRepository. 2401 We must handle _baseinfo_extrainfo_2010. 2402 """ 2403 super(EntropySQLiteRepository, self).createAllIndexes() 2404 if not self._isBaseinfoExtrainfo2010(): 2405 self.__createLicensesIndex() 2406 self.__createCategoriesIndex() 2407 self.__createCompileFlagsIndex()
2408
2409 - def __createCompileFlagsIndex(self):
2410 try: 2411 self._cursor().execute(""" 2412 CREATE INDEX IF NOT EXISTS flagsindex ON flags 2413 ( chost, cflags, cxxflags ) 2414 """) 2415 except OperationalError: 2416 pass
2417
2418 - def __createCategoriesIndex(self):
2419 self._cursor().execute(""" 2420 CREATE INDEX IF NOT EXISTS categoriesindex_category 2421 ON categories ( category ) 2422 """)
2423
2424 - def __createLicensesIndex(self):
2425 self._cursor().execute(""" 2426 CREATE INDEX IF NOT EXISTS licensesindex ON licenses ( license ) 2427 """)
2428
2429 - def _createBaseinfoIndex(self):
2430 """ 2431 Reimplemented from EntropySQLRepository. 2432 We must handle _baseinfo_extrainfo_2010. 2433 """ 2434 if self._isBaseinfoExtrainfo2010(): 2435 return super(EntropySQLiteRepository, 2436 self)._createBaseinfoIndex() 2437 2438 # backward compatibility 2439 self._cursor().executescript(""" 2440 CREATE INDEX IF NOT EXISTS baseindex_atom 2441 ON baseinfo ( atom ); 2442 CREATE INDEX IF NOT EXISTS baseindex_branch_name 2443 ON baseinfo ( name, branch ); 2444 CREATE INDEX IF NOT EXISTS baseindex_branch_name_idcategory 2445 ON baseinfo ( name, idcategory, branch ); 2446 CREATE INDEX IF NOT EXISTS baseindex_idlicense 2447 ON baseinfo ( idlicense, idcategory ); 2448 """)
2449
2450 - def _migrateNeededLibs(self):
2451 """ 2452 Migrate from needed and neededreference schema to the 2453 new needed_libs. 2454 """ 2455 if self._doesTableExist("needed_libs"): 2456 return 2457 2458 self._cursor().executescript(""" 2459 BEGIN TRANSACTION; 2460 DROP TABLE IF EXISTS needed_libs_temp; 2461 CREATE TABLE needed_libs_temp ( 2462 idpackage INTEGER, 2463 lib_user_path VARCHAR, 2464 lib_user_soname VARCHAR, 2465 soname VARCHAR, 2466 elfclass INTEGER, 2467 rpath VARCHAR, 2468 FOREIGN KEY(idpackage) 2469 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2470 ); 2471 INSERT INTO needed_libs_temp 2472 SELECT needed.idpackage, "", "", neededreference.library, 2473 needed.elfclass, "" FROM needed, neededreference 2474 WHERE needed.idneeded = neededreference.idneeded; 2475 2476 ALTER TABLE needed_libs_temp RENAME TO needed_libs; 2477 DELETE FROM neededreference; 2478 DELETE FROM needed; 2479 COMMIT; 2480 """) 2481 self._clearLiveCache("_doesTableExist") 2482 self._clearLiveCache("_doesColumnInTableExist")
2483
2484 - def _isBaseinfoExtrainfo2010(self):
2485 """ 2486 Return is _baseinfo_extrainfo_2010 setting is 2487 found via getSetting() 2488 """ 2489 try: 2490 self.getSetting("_baseinfo_extrainfo_2010") 2491 # extra check to avoid issues with settings table creation 2492 # before the actual schema update, check if baseinfo has the 2493 # category column. 2494 return self._doesColumnInTableExist("baseinfo", "category") 2495 except KeyError: 2496 return False
2497
2498 - def _migrateBaseinfoExtrainfo(self):
2499 """ 2500 Support for optimized baseinfo table, migration function. 2501 """ 2502 if self._isBaseinfoExtrainfo2010(): 2503 return 2504 if not self._doesTableExist("baseinfo"): 2505 return 2506 if not self._doesTableExist("extrainfo"): 2507 return 2508 if not self._doesTableExist("licenses"): 2509 return 2510 if not self._doesTableExist("categories"): 2511 return 2512 if not self._doesTableExist("flags"): 2513 return 2514 2515 mytxt = "%s: [%s] %s" % ( 2516 bold(_("ATTENTION")), 2517 purple(self.name), 2518 red(_("updating repository metadata layout, please wait!")), 2519 ) 2520 self.output( 2521 mytxt, 2522 importance = 1, 2523 level = "warning") 2524 2525 self.dropAllIndexes() 2526 self._cursor().execute("pragma foreign_keys = OFF").fetchall() 2527 self._cursor().executescript(""" 2528 BEGIN TRANSACTION; 2529 2530 DROP TABLE IF EXISTS baseinfo_new_temp; 2531 CREATE TABLE baseinfo_new_temp ( 2532 idpackage INTEGER PRIMARY KEY AUTOINCREMENT, 2533 atom VARCHAR, 2534 category VARCHAR, 2535 name VARCHAR, 2536 version VARCHAR, 2537 versiontag VARCHAR, 2538 revision INTEGER, 2539 branch VARCHAR, 2540 slot VARCHAR, 2541 license VARCHAR, 2542 etpapi INTEGER, 2543 trigger INTEGER 2544 ); 2545 INSERT INTO baseinfo_new_temp 2546 SELECT idpackage, atom, category, name, version, versiontag, 2547 revision, branch, slot, license, etpapi, trigger 2548 FROM baseinfo, licenses, categories WHERE 2549 categories.idcategory = baseinfo.idcategory AND 2550 licenses.idlicense = baseinfo.idlicense; 2551 DROP TABLE baseinfo; 2552 ALTER TABLE baseinfo_new_temp RENAME TO baseinfo; 2553 DROP TABLE categories; 2554 DROP TABLE licenses; 2555 2556 DROP TABLE IF EXISTS extrainfo_new_temp; 2557 CREATE TABLE extrainfo_new_temp ( 2558 idpackage INTEGER PRIMARY KEY, 2559 description VARCHAR, 2560 homepage VARCHAR, 2561 download VARCHAR, 2562 size VARCHAR, 2563 chost VARCHAR, 2564 cflags VARCHAR, 2565 cxxflags VARCHAR, 2566 digest VARCHAR, 2567 datecreation VARCHAR, 2568 FOREIGN KEY(idpackage) 2569 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2570 ); 2571 INSERT INTO extrainfo_new_temp 2572 SELECT idpackage, description, homepage, download, size, 2573 flags.chost, flags.cflags, flags.cxxflags, 2574 digest, datecreation 2575 FROM extrainfo, flags WHERE flags.idflags = extrainfo.idflags; 2576 DROP TABLE extrainfo; 2577 ALTER TABLE extrainfo_new_temp RENAME TO extrainfo; 2578 DROP TABLE flags; 2579 2580 COMMIT; 2581 """) 2582 self._cursor().execute("pragma foreign_keys = ON").fetchall() 2583 2584 self._clearLiveCache("_doesColumnInTableExist") 2585 self._setSetting("_baseinfo_extrainfo_2010", "1") 2586 self._connection().commit()
2587
2588 - def _foreignKeySupport(self):
2589 2590 # entropy.qa uses this name, must skip migration 2591 if self.name in ("qa_testing", "mem_repo"): 2592 return 2593 2594 tables = ("extrainfo", "dependencies" , "provide", 2595 "conflicts", "configprotect", "configprotectmask", "sources", 2596 "useflags", "keywords", "content", "counters", "sizes", 2597 "needed", "needed_libs", "triggers", "systempackages", "injected", 2598 "installedtable", "automergefiles", "packagesignatures", 2599 "packagespmphases", "provided_libs") 2600 2601 done_something = False 2602 foreign_keys_supported = False 2603 for table in tables: 2604 if not self._doesTableExist(table): 2605 continue 2606 2607 cur = self._cursor().execute(""" 2608 PRAGMA foreign_key_list(%s) 2609 """ % (table,)) 2610 foreign_keys = cur.fetchone() 2611 2612 # print table, "foreign keys", foreign_keys 2613 if foreign_keys is not None: 2614 # seems so, more or less 2615 foreign_keys_supported = True 2616 continue 2617 2618 if not done_something: 2619 mytxt = "%s: [%s] %s" % ( 2620 bold(_("ATTENTION")), 2621 purple(self.name), 2622 red(_("updating repository metadata layout, please wait!")), 2623 ) 2624 self.output( 2625 mytxt, 2626 importance = 1, 2627 level = "warning" 2628 ) 2629 2630 done_something = True 2631 # need to add foreign key to this table 2632 cur = self._cursor().execute("""SELECT sql FROM sqlite_master 2633 WHERE type='table' and name = (?)""", (table,)) 2634 cur_sql = cur.fetchone()[0] 2635 2636 # change table name 2637 tmp_table = table+"_fk_sup" 2638 self._cursor().execute("DROP TABLE IF EXISTS %s" % (tmp_table,)) 2639 2640 bracket_idx = cur_sql.find("(") 2641 cur_sql = cur_sql[bracket_idx:] 2642 cur_sql = "CREATE TABLE %s %s" % (tmp_table, cur_sql) 2643 2644 # remove final parenthesis and strip 2645 cur_sql = cur_sql[:-1].strip() 2646 # add foreign key stmt 2647 cur_sql += """, 2648 FOREIGN KEY(idpackage) REFERENCES 2649 baseinfo(idpackage) ON DELETE CASCADE );""" 2650 self._cursor().executescript(cur_sql) 2651 self._moveContent(table, tmp_table) 2652 self._atomicRename(tmp_table, table) 2653 2654 if done_something: 2655 self._setSetting("on_delete_cascade", "1") 2656 self._connection().commit() 2657 # recreate indexes 2658 self.createAllIndexes() 2659 elif foreign_keys_supported: 2660 # some devel version didn't have this set 2661 try: 2662 self.getSetting("on_delete_cascade") 2663 except KeyError: 2664 self._setSetting("on_delete_cascade", "1") 2665 self._connection().commit()
2666
2667 - def _moveContent(self, from_table, to_table):
2668 self._cursor().execute(""" 2669 INSERT INTO %s SELECT * FROM %s 2670 """ % (to_table, from_table,))
2671
2672 - def _atomicRename(self, from_table, to_table):
2673 self._cursor().executescript(""" 2674 BEGIN TRANSACTION; 2675 DROP TABLE IF EXISTS %s; 2676 ALTER TABLE %s RENAME TO %s; 2677 COMMIT; 2678 """ % (to_table, from_table, to_table,))
2679
2680 - def _migrateCountersTable(self):
2681 self._cursor().executescript(""" 2682 BEGIN TRANSACTION; 2683 DROP TABLE IF EXISTS counterstemp; 2684 CREATE TABLE counterstemp ( 2685 counter INTEGER, idpackage INTEGER, branch VARCHAR, 2686 PRIMARY KEY(idpackage,branch), 2687 FOREIGN KEY(idpackage) 2688 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2689 ); 2690 INSERT INTO counterstemp (counter, idpackage, branch) 2691 SELECT counter, idpackage, branch FROM counters; 2692 DROP TABLE IF EXISTS counters; 2693 ALTER TABLE counterstemp RENAME TO counters; 2694 COMMIT; 2695 """) 2696 self._clearLiveCache("_doesTableExist") 2697 self._clearLiveCache("_doesColumnInTableExist")
2698
2699 - def _createSettingsTable(self):
2700 self._cursor().executescript(""" 2701 CREATE TABLE settings ( 2702 setting_name VARCHAR, 2703 setting_value VARCHAR, 2704 PRIMARY KEY(setting_name) 2705 ); 2706 """) 2707 self._setupInitialSettings() 2708 self._clearLiveCache("_doesTableExist") 2709 self._clearLiveCache("_doesColumnInTableExist")
2710
2711 - def _createProvidedLibs(self):
2712 2713 def do_create(): 2714 self._cursor().executescript(""" 2715 CREATE TABLE provided_libs ( 2716 idpackage INTEGER, 2717 library VARCHAR, 2718 path VARCHAR, 2719 elfclass INTEGER, 2720 FOREIGN KEY(idpackage) REFERENCES baseinfo(idpackage) 2721 ON DELETE CASCADE 2722 ); 2723 """) 2724 self._clearLiveCache("_doesTableExist") 2725 self._clearLiveCache("_doesColumnInTableExist")
2726 2727 mytxt = "%s: %s" % ( 2728 bold(_("ATTENTION")), 2729 red(_("generating provided_libs metadata, please wait!")), 2730 ) 2731 self.output( 2732 mytxt, 2733 importance = 1, 2734 level = "warning" 2735 ) 2736 2737 try: 2738 self._generateProvidedLibsMetadata() 2739 except (IOError, OSError, Error) as err: 2740 mytxt = "%s: %s: [%s]" % ( 2741 bold(_("ATTENTION")), 2742 red("cannot generate provided_libs metadata"), 2743 err, 2744 ) 2745 self.output( 2746 mytxt, 2747 importance = 1, 2748 level = "warning" 2749 ) 2750 do_create() 2751
2752 - def _createPreservedLibsTable(self):
2753 self._cursor().executescript(""" 2754 CREATE TABLE preserved_libs ( 2755 library VARCHAR, 2756 elfclass INTEGER, 2757 path VARCHAR, 2758 atom VARCHAR, 2759 PRIMARY KEY (library, path, elfclass) 2760 ); 2761 """) 2762 self._clearLiveCache("_doesTableExist") 2763 self._clearLiveCache("_doesColumnInTableExist")
2764
2765 - def _createPreservedLibsAtomColumn(self):
2766 self._cursor().execute(""" 2767 ALTER TABLE preserved_libs ADD atom VARCHAR; 2768 """) 2769 self._clearLiveCache("_doesColumnInTableExist")
2770
2771 - def _createPackageDownloadsTable(self):
2772 self._cursor().executescript(""" 2773 CREATE TABLE packagedownloads ( 2774 idpackage INTEGER, 2775 download VARCHAR, 2776 type VARCHAR, 2777 size INTEGER, 2778 disksize INTEGER, 2779 md5 VARCHAR, 2780 sha1 VARCHAR, 2781 sha256 VARCHAR, 2782 sha512 VARCHAR, 2783 gpg BLOB, 2784 FOREIGN KEY(idpackage) 2785 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2786 ); 2787 """) 2788 self._clearLiveCache("_doesTableExist") 2789 self._clearLiveCache("_doesColumnInTableExist")
2790
2791 - def _generateProvidedLibsMetadata(self):
2792 2793 def collect_provided(pkg_dir, content): 2794 2795 provided_libs = set() 2796 ldpaths = set(entropy.tools.collect_linker_paths()) 2797 for obj, ftype in list(content.items()): 2798 2799 if ftype == "dir": 2800 continue 2801 obj_dir, obj_name = os.path.split(obj) 2802 2803 if obj_dir not in ldpaths: 2804 continue 2805 2806 unpack_obj = os.path.join(pkg_dir, obj) 2807 try: 2808 os.stat(unpack_obj) 2809 except OSError: 2810 continue 2811 2812 # do not trust ftype 2813 if os.path.isdir(unpack_obj): 2814 continue 2815 if not entropy.tools.is_elf_file(unpack_obj): 2816 continue 2817 2818 elf_class = entropy.tools.read_elf_class(unpack_obj) 2819 provided_libs.add((obj_name, obj, elf_class,)) 2820 2821 return provided_libs
2822 2823 self._cursor().executescript(""" 2824 DROP TABLE IF EXISTS provided_libs_tmp; 2825 CREATE TABLE provided_libs_tmp ( 2826 idpackage INTEGER, 2827 library VARCHAR, 2828 path VARCHAR, 2829 elfclass INTEGER, 2830 FOREIGN KEY(idpackage) REFERENCES baseinfo(idpackage) 2831 ON DELETE CASCADE 2832 ); 2833 """) 2834 2835 pkgs = self.listAllPackageIds() 2836 for package_id in pkgs: 2837 2838 content = self.retrieveContent(package_id, extended = True, 2839 formatted = True) 2840 provided_libs = collect_provided(etpConst['systemroot'], content) 2841 2842 self._cursor().executemany(""" 2843 INSERT INTO provided_libs_tmp VALUES (?,?,?,?) 2844 """, [(package_id, x, y, z,) for x, y, z in provided_libs]) 2845 2846 # rename 2847 self._cursor().execute(""" 2848 ALTER TABLE provided_libs_tmp RENAME TO provided_libs; 2849 """) 2850 # make sure that live_cache reports correct info regarding tables 2851 self._clearLiveCache("_doesTableExist") 2852 self._clearLiveCache("_doesColumnInTableExist") 2853
2854 - def _createProvideDefault(self):
2855 self._cursor().execute(""" 2856 ALTER TABLE provide ADD COLUMN is_default INTEGER DEFAULT 0 2857 """) 2858 self._clearLiveCache("_doesTableExist") 2859 self._clearLiveCache("_doesColumnInTableExist")
2860
2861 - def _createInstalledTableSource(self):
2862 self._cursor().execute(""" 2863 ALTER TABLE installedtable ADD source INTEGER; 2864 """) 2865 self._cursor().execute(""" 2866 UPDATE installedtable SET source = (?) 2867 """, (etpConst['install_sources']['unknown'],)) 2868 self._clearLiveCache("getInstalledPackageRepository") 2869 self._clearLiveCache("getInstalledPackageSource") 2870 self._clearLiveCache("_doesTableExist") 2871 self._clearLiveCache("_doesColumnInTableExist")
2872
2873 - def _createPackagechangelogsTable(self):
2874 self._cursor().execute(""" 2875 CREATE TABLE packagechangelogs ( category VARCHAR, 2876 name VARCHAR, changelog BLOB, PRIMARY KEY (category, name)); 2877 """) 2878 self._clearLiveCache("_doesTableExist") 2879 self._clearLiveCache("_doesColumnInTableExist")
2880
2881 - def _createAutomergefilesTable(self):
2882 self._cursor().execute(""" 2883 CREATE TABLE automergefiles ( idpackage INTEGER, 2884 configfile VARCHAR, md5 VARCHAR, 2885 FOREIGN KEY(idpackage) REFERENCES baseinfo(idpackage) 2886 ON DELETE CASCADE ); 2887 """) 2888 self._clearLiveCache("_doesTableExist") 2889 self._clearLiveCache("_doesColumnInTableExist")
2890
2891 - def _createPackagesignaturesTable(self):
2892 self._cursor().execute(""" 2893 CREATE TABLE packagesignatures ( 2894 idpackage INTEGER PRIMARY KEY, 2895 sha1 VARCHAR, 2896 sha256 VARCHAR, 2897 sha512 VARCHAR, 2898 gpg BLOB, 2899 FOREIGN KEY(idpackage) 2900 REFERENCES baseinfo(idpackage) ON DELETE CASCADE ); 2901 """) 2902 self._clearLiveCache("_doesTableExist") 2903 self._clearLiveCache("_doesColumnInTableExist")
2904
2905 - def _createPackagesignaturesGpgColumn(self):
2906 self._cursor().execute(""" 2907 ALTER TABLE packagesignatures ADD gpg BLOB; 2908 """) 2909 self._clearLiveCache("_doesColumnInTableExist")
2910
2911 - def _createPackagespmphases(self):
2912 self._cursor().execute(""" 2913 CREATE TABLE packagespmphases ( 2914 idpackage INTEGER PRIMARY KEY, 2915 phases VARCHAR, 2916 FOREIGN KEY(idpackage) 2917 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2918 ); 2919 """) 2920 self._clearLiveCache("_doesTableExist") 2921 self._clearLiveCache("_doesColumnInTableExist")
2922
2923 - def _createPackagespmrepository(self):
2924 self._cursor().execute(""" 2925 CREATE TABLE packagespmrepository ( 2926 idpackage INTEGER PRIMARY KEY, 2927 repository VARCHAR, 2928 FOREIGN KEY(idpackage) 2929 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2930 ); 2931 """) 2932 self._clearLiveCache("_doesTableExist") 2933 self._clearLiveCache("_doesColumnInTableExist")
2934
2935 - def _createEntropyBranchMigrationTable(self):
2936 self._cursor().execute(""" 2937 CREATE TABLE entropy_branch_migration ( 2938 repository VARCHAR, 2939 from_branch VARCHAR, 2940 to_branch VARCHAR, 2941 post_migration_md5sum VARCHAR, 2942 post_upgrade_md5sum VARCHAR, 2943 PRIMARY KEY (repository, from_branch, to_branch) 2944 ); 2945 """) 2946 self._clearLiveCache("_doesTableExist") 2947 self._clearLiveCache("_doesColumnInTableExist")
2948
2949 - def _createPackagesetsTable(self):
2950 self._cursor().execute(""" 2951 CREATE TABLE packagesets ( setname VARCHAR, dependency VARCHAR ); 2952 """) 2953 self._clearLiveCache("_doesTableExist") 2954 self._clearLiveCache("_doesColumnInTableExist")
2955
2956 - def _createPackageDesktopMimeTable(self):
2957 self._cursor().execute(""" 2958 CREATE TABLE packagedesktopmime ( 2959 idpackage INTEGER, 2960 name VARCHAR, 2961 mimetype VARCHAR, 2962 executable VARCHAR, 2963 icon VARCHAR, 2964 FOREIGN KEY(idpackage) 2965 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2966 ); 2967 """) 2968 self._clearLiveCache("_doesTableExist") 2969 self._clearLiveCache("_doesColumnInTableExist")
2970
2971 - def _createProvidedMimeTable(self):
2972 self._cursor().execute(""" 2973 CREATE TABLE provided_mime ( 2974 mimetype VARCHAR, 2975 idpackage INTEGER, 2976 FOREIGN KEY(idpackage) 2977 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2978 ); 2979 """) 2980 self._clearLiveCache("_doesTableExist") 2981 self._clearLiveCache("_doesColumnInTableExist")
2982
2983 - def _createLicensesAcceptedTable(self):
2984 self._cursor().execute(""" 2985 CREATE TABLE licenses_accepted ( licensename VARCHAR UNIQUE ); 2986 """) 2987 self._clearLiveCache("_doesTableExist") 2988 self._clearLiveCache("_doesColumnInTableExist")
2989
2990 - def _createContentSafetyTable(self):
2991 self._cursor().execute(""" 2992 CREATE TABLE contentsafety ( 2993 idpackage INTEGER, 2994 file VARCHAR, 2995 mtime FLOAT, 2996 sha256 VARCHAR, 2997 FOREIGN KEY(idpackage) 2998 REFERENCES baseinfo(idpackage) ON DELETE CASCADE 2999 ); 3000 """) 3001 self._clearLiveCache("_doesTableExist") 3002 self._clearLiveCache("_doesColumnInTableExist")
3003