Mercurial > p > roundup > code
view roundup/backends/sessions_sqlite.py @ 6804:25d08e15e3b4
issue2551224 - Replace dbm db for sessions/otks when using sqlite
Generate new sqlite db's for storing one time keys and session and
other ephemeral data.
| author | John Rouillard <rouilj@ieee.org> |
|---|---|
| date | Mon, 25 Jul 2022 15:30:36 -0400 |
| parents | |
| children | 375d40a9e730 |
line wrap: on
line source
"""This module defines a very basic store that's used by the CGI interface to store session and one-time-key information. Yes, it's called "sessions" - because originally it only defined a session class. It's now also used for One Time Key handling too. We needed to split commits to session/OTK database from commits on the main db structures (user data). This required two connections to the sqlite db, which wasn't supported. This module was created so sqlite didn't have to use dbm for the session/otk data. It hopefully will provide a performance speedup. """ __docformat__ = 'restructuredtext' import os, time, logging from roundup.anypy.html import html_escape as escape class BasicDatabase: ''' Provide a nice encapsulation of an RDBMS table. Keys are id strings, values are automatically marshalled data. ''' name = None def __init__(self, db): self.db = db self.conn, self.cursor = self.db.sql_open_connection(dbname=self.name) self.sql('''SELECT name FROM sqlite_master WHERE type='table' AND name='%ss';'''%self.name) table_exists = self.cursor.fetchone() if not table_exists: # create table/rows etc. self.sql('''CREATE TABLE %(name)ss (%(name)s_key VARCHAR(255), %(name)s_value TEXT, %(name)s_time REAL)'''%{"name":self.name}) self.sql('CREATE INDEX %(name)s_key_idx ON %(name)ss(%(name)s_key)'%{"name":self.name}) self.commit() def log_debug(self, msg, *args, **kwargs): """Log a message with level DEBUG.""" logger = self.get_logger() logger.debug(msg, *args, **kwargs) def log_info(self, msg, *args, **kwargs): """Log a message with level INFO.""" logger = self.get_logger() logger.info(msg, *args, **kwargs) def get_logger(self): """Return the logger for this database.""" # Because getting a logger requires acquiring a lock, we want # to do it only once. if not hasattr(self, '__logger'): self.__logger = logging.getLogger('roundup') return self.__logger def sql(self, sql, args=None, cursor=None): """ Execute the sql with the optional args. """ self.log_debug('SQL %r %r' % (sql, args)) if not cursor: cursor = self.cursor if args: cursor.execute(sql, args) else: cursor.execute(sql) def clear(self): self.cursor.execute('delete from %ss'%self.name) def exists(self, infoid): n = self.name self.cursor.execute('select count(*) from %ss where %s_key=%s'%(n, n, self.db.arg), (infoid,)) return int(self.cursor.fetchone()[0]) _marker = [] def get(self, infoid, value, default=_marker): n = self.name self.cursor.execute('select %s_value from %ss where %s_key=%s'%(n, n, n, self.db.arg), (infoid,)) res = self.cursor.fetchone() if not res: if default != self._marker: return default raise KeyError('No such %s "%s"'%(self.name, escape(infoid))) values = eval(res[0]) return values.get(value, None) def getall(self, infoid): n = self.name self.cursor.execute('select %s_value from %ss where %s_key=%s'%(n, n, n, self.db.arg), (infoid,)) res = self.cursor.fetchone() if not res: raise KeyError('No such %s "%s"'%(self.name, escape (infoid))) return eval(res[0]) def set(self, infoid, **newvalues): """ Store all newvalues under key infoid with a timestamp in database. If newvalues['__timestamp'] exists and is representable as a floating point number (i.e. could be generated by time.time()), that value is used for the <name>_time column in the database. """ c = self.cursor n = self.name a = self.db.arg c.execute('select %s_value from %ss where %s_key=%s'%(n, n, n, a), (infoid,)) res = c.fetchone() if res: values = eval(res[0]) else: values = {} values.update(newvalues) if res: sql = 'update %ss set %s_value=%s where %s_key=%s'%(n, n, a, n, a) args = (repr(values), infoid) else: if '__timestamp' in newvalues: try: # __timestamp must be represntable as a float. Check it. timestamp = float(newvalues['__timestamp']) except ValueError: timestamp = time.time() else: timestamp = time.time() sql = 'insert into %ss (%s_key, %s_time, %s_value) '\ 'values (%s, %s, %s)'%(n, n, n, n, a, a, a) args = (infoid, timestamp, repr(values)) c.execute(sql, args) def list(self): c = self.cursor n = self.name c.execute('select %s_key from %ss'%(n, n)) return [res[0] for res in c.fetchall()] def destroy(self, infoid): self.cursor.execute('delete from %ss where %s_key=%s'%(self.name, self.name, self.db.arg), (infoid,)) def updateTimestamp(self, infoid): """ don't update every hit - once a minute should be OK """ now = time.time() self.cursor.execute('''update %ss set %s_time=%s where %s_key=%s and %s_time < %s'''%(self.name, self.name, self.db.arg, self.name, self.db.arg, self.name, self.db.arg), (now, infoid, now-60)) def clean(self): ''' Remove session records that haven't been used for a week. ''' now = time.time() week = 60*60*24*7 old = now - week self.cursor.execute('delete from %ss where %s_time < %s'%(self.name, self.name, self.db.arg), (old, )) def lifetime(self, key_lifetime=None): """Return the proper timestamp for a key with key_lifetime specified in seconds. """ now = time.time() week = 60*60*24*7 return now - week + lifetime def commit(self): logger = logging.getLogger('roundup.hyperdb.backend') logger.info('commit %s' % self.name) self.conn.commit() self.cursor = self.conn.cursor() def close(self): self.conn.close() class Sessions(BasicDatabase): name = 'session' class OneTimeKeys(BasicDatabase): name = 'otk' # vim: set et sts=4 sw=4 :
