Mercurial > p > roundup > code
comparison roundup/backends/back_sqlite.py @ 6050:19a58adf5d1f
Flake8: whitepace; don't import base64; unused loop var gets leading _
| author | John Rouillard <rouilj@ieee.org> |
|---|---|
| date | Sat, 11 Jan 2020 16:50:44 -0500 |
| parents | 16e1255b16cf |
| children | 91ab3e0ffcd0 |
comparison
equal
deleted
inserted
replaced
| 6049:99d4fb22aa65 | 6050:19a58adf5d1f |
|---|---|
| 6 NOTE: we use the rdbms_common table creation methods which define datatypes | 6 NOTE: we use the rdbms_common table creation methods which define datatypes |
| 7 for the columns, but sqlite IGNORES these specifications. | 7 for the columns, but sqlite IGNORES these specifications. |
| 8 """ | 8 """ |
| 9 __docformat__ = 'restructuredtext' | 9 __docformat__ = 'restructuredtext' |
| 10 | 10 |
| 11 import os, base64, marshal, shutil, time, logging | 11 import os, marshal, shutil, time, logging |
| 12 | 12 |
| 13 from roundup import hyperdb, date, password | 13 from roundup import hyperdb, date, password |
| 14 from roundup.backends import rdbms_common | 14 from roundup.backends import rdbms_common |
| 15 from roundup.backends.sessions_dbm import Sessions, OneTimeKeys | 15 from roundup.backends.sessions_dbm import Sessions, OneTimeKeys |
| 16 from roundup.anypy.strings import uany2s | 16 from roundup.anypy.strings import uany2s |
| 20 import sqlite3 as sqlite | 20 import sqlite3 as sqlite |
| 21 sqlite_version = 3 | 21 sqlite_version = 3 |
| 22 except ImportError: | 22 except ImportError: |
| 23 try: | 23 try: |
| 24 from pysqlite2 import dbapi2 as sqlite | 24 from pysqlite2 import dbapi2 as sqlite |
| 25 if sqlite.version_info < (2,1,0): | 25 if sqlite.version_info < (2, 1, 0): |
| 26 raise ValueError('pysqlite2 minimum version is 2.1.0+ ' | 26 raise ValueError('pysqlite2 minimum version is 2.1.0+ ' |
| 27 '- %s found'%sqlite.version) | 27 '- %s found' % sqlite.version) |
| 28 sqlite_version = 2 | 28 sqlite_version = 2 |
| 29 except ImportError: | 29 except ImportError: |
| 30 import sqlite | 30 import sqlite |
| 31 sqlite_version = 1 | 31 sqlite_version = 1 |
| 32 | 32 |
| 33 | |
| 33 def db_exists(config): | 34 def db_exists(config): |
| 34 return os.path.exists(os.path.join(config.DATABASE, 'db')) | 35 return os.path.exists(os.path.join(config.DATABASE, 'db')) |
| 35 | 36 |
| 37 | |
| 36 def db_nuke(config): | 38 def db_nuke(config): |
| 37 shutil.rmtree(config.DATABASE) | 39 shutil.rmtree(config.DATABASE) |
| 40 | |
| 38 | 41 |
| 39 class Database(rdbms_common.Database): | 42 class Database(rdbms_common.Database): |
| 40 """Sqlite DB backend implementation | 43 """Sqlite DB backend implementation |
| 41 | 44 |
| 42 attributes: | 45 attributes: |
| 45 identify the database type so it can import the correct indexer | 48 identify the database type so it can import the correct indexer |
| 46 module when using native text search mode. | 49 module when using native text search mode. |
| 47 """ | 50 """ |
| 48 | 51 |
| 49 # char to use for positional arguments | 52 # char to use for positional arguments |
| 50 if sqlite_version in (2,3): | 53 if sqlite_version in (2, 3): |
| 51 arg = '?' | 54 arg = '?' |
| 52 else: | 55 else: |
| 53 arg = '%s' | 56 arg = '%s' |
| 54 | 57 |
| 55 dbtype = "sqlite" | 58 dbtype = "sqlite" |
| 63 # is set to False here. | 66 # is set to False here. |
| 64 | 67 |
| 65 implements_double_precision = False | 68 implements_double_precision = False |
| 66 | 69 |
| 67 hyperdb_to_sql_datatypes = { | 70 hyperdb_to_sql_datatypes = { |
| 68 hyperdb.String : 'VARCHAR(255)', | 71 hyperdb.String : 'VARCHAR(255)', |
| 69 hyperdb.Date : 'VARCHAR(30)', | 72 hyperdb.Date : 'VARCHAR(30)', |
| 70 hyperdb.Link : 'INTEGER', | 73 hyperdb.Link : 'INTEGER', |
| 71 hyperdb.Interval : 'VARCHAR(255)', | 74 hyperdb.Interval : 'VARCHAR(255)', |
| 72 hyperdb.Password : 'VARCHAR(255)', | 75 hyperdb.Password : 'VARCHAR(255)', |
| 73 hyperdb.Boolean : 'BOOLEAN', | 76 hyperdb.Boolean : 'BOOLEAN', |
| 74 hyperdb.Number : 'REAL', | 77 hyperdb.Number : 'REAL', |
| 75 hyperdb.Integer : 'INTEGER', | 78 hyperdb.Integer : 'INTEGER', |
| 76 } | 79 } |
| 77 hyperdb_to_sql_value = { | 80 hyperdb_to_sql_value = { |
| 78 hyperdb.String : str, | 81 hyperdb.String : str, |
| 79 hyperdb.Date : lambda x: x.serialise(), | 82 hyperdb.Date : lambda x: x.serialise(), |
| 80 hyperdb.Link : int, | 83 hyperdb.Link : int, |
| 81 hyperdb.Interval : str, | 84 hyperdb.Interval : str, |
| 82 hyperdb.Password : str, | 85 hyperdb.Password : str, |
| 83 hyperdb.Boolean : int, | 86 hyperdb.Boolean : int, |
| 84 hyperdb.Integer : int, | 87 hyperdb.Integer : int, |
| 85 hyperdb.Number : lambda x: x, | 88 hyperdb.Number : lambda x: x, |
| 86 hyperdb.Multilink : lambda x: x, # used in journal marshalling | 89 hyperdb.Multilink : lambda x: x, # used in journal marshalling |
| 87 } | 90 } |
| 88 sql_to_hyperdb_value = { | 91 sql_to_hyperdb_value = { |
| 89 hyperdb.String : uany2s, | 92 hyperdb.String : uany2s, |
| 90 hyperdb.Date : lambda x: date.Date(str(x)), | 93 hyperdb.Date : lambda x: date.Date(str(x)), |
| 91 hyperdb.Link : str, # XXX numeric ids | 94 hyperdb.Link : str, # XXX numeric ids |
| 92 hyperdb.Interval : date.Interval, | 95 hyperdb.Interval : date.Interval, |
| 93 hyperdb.Password : lambda x: password.Password(encrypted=x), | 96 hyperdb.Password : lambda x: password.Password(encrypted=x), |
| 94 hyperdb.Boolean : int, | 97 hyperdb.Boolean : int, |
| 95 hyperdb.Integer : int, | 98 hyperdb.Integer : int, |
| 96 hyperdb.Number : rdbms_common._num_cvt, | 99 hyperdb.Number : rdbms_common._num_cvt, |
| 134 # database itself will be created by sqlite if needed | 137 # database itself will be created by sqlite if needed |
| 135 if not os.path.isdir(self.config.DATABASE): | 138 if not os.path.isdir(self.config.DATABASE): |
| 136 os.makedirs(self.config.DATABASE) | 139 os.makedirs(self.config.DATABASE) |
| 137 | 140 |
| 138 db = os.path.join(self.config.DATABASE, 'db') | 141 db = os.path.join(self.config.DATABASE, 'db') |
| 139 logging.getLogger('roundup.hyperdb').info('open database %r'%db) | 142 logging.getLogger('roundup.hyperdb').info('open database %r' % db) |
| 140 # set timeout (30 second default is extraordinarily generous) | 143 # set timeout (30 second default is extraordinarily generous) |
| 141 # for handling locked database | 144 # for handling locked database |
| 142 if sqlite_version == 1: | 145 if sqlite_version == 1: |
| 143 conn = sqlite.connect(db=db) | 146 conn = sqlite.connect(db=db) |
| 144 conn.db.sqlite_busy_handler(self.sqlite_busy_handler) | 147 conn.db.sqlite_busy_handler(self.sqlite_busy_handler) |
| 172 self.sql('create index ids_name_idx on ids(name)') | 175 self.sql('create index ids_name_idx on ids(name)') |
| 173 self.create_version_2_tables() | 176 self.create_version_2_tables() |
| 174 | 177 |
| 175 def create_version_2_tables(self): | 178 def create_version_2_tables(self): |
| 176 self.sql('create table otks (otk_key varchar, ' | 179 self.sql('create table otks (otk_key varchar, ' |
| 177 'otk_value varchar, otk_time integer)') | 180 'otk_value varchar, otk_time integer)') |
| 178 self.sql('create index otks_key_idx on otks(otk_key)') | 181 self.sql('create index otks_key_idx on otks(otk_key)') |
| 179 self.sql('create table sessions (session_key varchar, ' | 182 self.sql('create table sessions (session_key varchar, ' |
| 180 'session_time integer, session_value varchar)') | 183 'session_time integer, session_value varchar)') |
| 181 self.sql('create index sessions_key_idx on ' | 184 self.sql('create index sessions_key_idx on ' |
| 182 'sessions(session_key)') | 185 'sessions(session_key)') |
| 183 | 186 |
| 184 # full-text indexing store | 187 # full-text indexing store |
| 185 self.sql('CREATE TABLE __textids (_class varchar, ' | 188 self.sql('CREATE TABLE __textids (_class varchar, ' |
| 186 '_itemid varchar, _prop varchar, _textid integer primary key) ') | 189 '_itemid varchar, _prop varchar, _textid' |
| 190 ' integer primary key) ') | |
| 187 self.sql('CREATE TABLE __words (_word varchar, ' | 191 self.sql('CREATE TABLE __words (_word varchar, ' |
| 188 '_textid integer)') | 192 '_textid integer)') |
| 189 self.sql('CREATE INDEX words_word_ids ON __words(_word)') | 193 self.sql('CREATE INDEX words_word_ids ON __words(_word)') |
| 190 self.sql('CREATE INDEX words_by_id ON __words (_textid)') | 194 self.sql('CREATE INDEX words_by_id ON __words (_textid)') |
| 191 self.sql('CREATE UNIQUE INDEX __textids_by_props ON ' | 195 self.sql('CREATE UNIQUE INDEX __textids_by_props ON ' |
| 192 '__textids (_class, _itemid, _prop)') | 196 '__textids (_class, _itemid, _prop)') |
| 193 sql = 'insert into ids (name, num) values (%s,%s)'%(self.arg, self.arg) | 197 sql = 'insert into ids (name, num) values (%s,%s)' % ( |
| 198 self.arg, self.arg) | |
| 194 self.sql(sql, ('__textids', 1)) | 199 self.sql(sql, ('__textids', 1)) |
| 195 | 200 |
| 196 def add_new_columns_v2(self): | 201 def add_new_columns_v2(self): |
| 197 # update existing tables to have the new actor column | 202 # update existing tables to have the new actor column |
| 198 tables = self.database_schema['tables'] | 203 tables = self.database_schema['tables'] |
| 222 if not force and new_spec == old_spec: | 227 if not force and new_spec == old_spec: |
| 223 # no changes | 228 # no changes |
| 224 return 0 | 229 return 0 |
| 225 | 230 |
| 226 logging.getLogger('roundup.hyperdb').info( | 231 logging.getLogger('roundup.hyperdb').info( |
| 227 'update_class %s'%spec.classname) | 232 'update_class %s' % spec.classname) |
| 228 | 233 |
| 229 # detect multilinks that have been removed, and drop their table | 234 # detect multilinks that have been removed, and drop their table |
| 230 old_has = {} | 235 old_has = {} |
| 231 for name, prop in old_spec[1]: | 236 for name, prop in old_spec[1]: |
| 232 old_has[name] = 1 | 237 old_has[name] = 1 |
| 233 if name in spec.properties or not isinstance(prop, hyperdb.Multilink): | 238 if name in spec.properties or not \ |
| 239 isinstance(prop, hyperdb.Multilink): | |
| 234 continue | 240 continue |
| 235 # it's a multilink, and it's been removed - drop the old | 241 # it's a multilink, and it's been removed - drop the old |
| 236 # table. First drop indexes. | 242 # table. First drop indexes. |
| 237 self.drop_multilink_table_indexes(spec.classname, name) | 243 self.drop_multilink_table_indexes(spec.classname, name) |
| 238 sql = 'drop table %s_%s'%(spec.classname, prop) | 244 sql = 'drop table %s_%s' % (spec.classname, prop) |
| 239 self.sql(sql) | 245 self.sql(sql) |
| 240 | 246 |
| 241 # now figure how we populate the new table | 247 # now figure how we populate the new table |
| 242 if adding_v2: | 248 if adding_v2: |
| 243 fetch = ['_activity', '_creation', '_creator'] | 249 fetch = ['_activity', '_creation', '_creator'] |
| 244 else: | 250 else: |
| 245 fetch = ['_actor', '_activity', '_creation', '_creator'] | 251 fetch = ['_actor', '_activity', '_creation', '_creator'] |
| 246 properties = spec.getprops() | 252 properties = spec.getprops() |
| 247 for propname,x in new_spec[1]: | 253 for propname, _x in new_spec[1]: |
| 248 prop = properties[propname] | 254 prop = properties[propname] |
| 249 if isinstance(prop, hyperdb.Multilink): | 255 if isinstance(prop, hyperdb.Multilink): |
| 250 if propname not in old_has: | 256 if propname not in old_has: |
| 251 # we need to create the new table | 257 # we need to create the new table |
| 252 self.create_multilink_table(spec, propname) | 258 self.create_multilink_table(spec, propname) |
| 253 elif force: | 259 elif force: |
| 254 tn = '%s_%s'%(spec.classname, propname) | 260 tn = '%s_%s' % (spec.classname, propname) |
| 255 # grabe the current values | 261 # grabe the current values |
| 256 sql = 'select linkid, nodeid from %s'%tn | 262 sql = 'select linkid, nodeid from %s' % tn |
| 257 self.sql(sql) | 263 self.sql(sql) |
| 258 rows = self.cursor.fetchall() | 264 rows = self.cursor.fetchall() |
| 259 | 265 |
| 260 # drop the old table | 266 # drop the old table |
| 261 self.drop_multilink_table_indexes(spec.classname, propname) | 267 self.drop_multilink_table_indexes(spec.classname, propname) |
| 262 sql = 'drop table %s'%tn | 268 sql = 'drop table %s' % tn |
| 263 self.sql(sql) | 269 self.sql(sql) |
| 264 | 270 |
| 265 # re-create and populate the new table | 271 # re-create and populate the new table |
| 266 self.create_multilink_table(spec, propname) | 272 self.create_multilink_table(spec, propname) |
| 267 sql = """insert into %s (linkid, nodeid) values | 273 sql = """insert into %s (linkid, nodeid) values |
| 268 (%s, %s)"""%(tn, self.arg, self.arg) | 274 (%s, %s)""" % (tn, self.arg, self.arg) |
| 269 for linkid, nodeid in rows: | 275 for linkid, nodeid in rows: |
| 270 self.sql(sql, (int(linkid), int(nodeid))) | 276 self.sql(sql, (int(linkid), int(nodeid))) |
| 271 elif propname in old_has: | 277 elif propname in old_has: |
| 272 # we copy this col over from the old table | 278 # we copy this col over from the old table |
| 273 fetch.append('_'+propname) | 279 fetch.append('_'+propname) |
| 275 # select the data out of the old table | 281 # select the data out of the old table |
| 276 fetch.append('id') | 282 fetch.append('id') |
| 277 fetch.append('__retired__') | 283 fetch.append('__retired__') |
| 278 fetchcols = ','.join(fetch) | 284 fetchcols = ','.join(fetch) |
| 279 cn = spec.classname | 285 cn = spec.classname |
| 280 sql = 'select %s from _%s'%(fetchcols, cn) | 286 sql = 'select %s from _%s' % (fetchcols, cn) |
| 281 self.sql(sql) | 287 self.sql(sql) |
| 282 olddata = self.cursor.fetchall() | 288 olddata = self.cursor.fetchall() |
| 283 | 289 |
| 284 # TODO: update all the other index dropping code | 290 # TODO: update all the other index dropping code |
| 285 self.drop_class_table_indexes(cn, old_spec[0]) | 291 self.drop_class_table_indexes(cn, old_spec[0]) |
| 286 | 292 |
| 287 # drop the old table | 293 # drop the old table |
| 288 self.sql('drop table _%s'%cn) | 294 self.sql('drop table _%s' % cn) |
| 289 | 295 |
| 290 # create the new table | 296 # create the new table |
| 291 self.create_class_table(spec) | 297 self.create_class_table(spec) |
| 292 | 298 |
| 293 if olddata: | 299 if olddata: |
| 294 inscols = ['id', '_actor', '_activity', '_creation', '_creator', '__retired__'] | 300 inscols = ['id', '_actor', '_activity', '_creation', |
| 295 for propname,x in new_spec[1]: | 301 '_creator', '__retired__'] |
| 302 for propname, _x in new_spec[1]: | |
| 296 prop = properties[propname] | 303 prop = properties[propname] |
| 297 if isinstance(prop, hyperdb.Multilink): | 304 if isinstance(prop, hyperdb.Multilink): |
| 298 continue | 305 continue |
| 299 elif isinstance(prop, hyperdb.Interval): | 306 elif isinstance(prop, hyperdb.Interval): |
| 300 inscols.append('_'+propname) | 307 inscols.append('_'+propname) |
| 305 | 312 |
| 306 # do the insert of the old data - the new columns will have | 313 # do the insert of the old data - the new columns will have |
| 307 # NULL values | 314 # NULL values |
| 308 args = ','.join([self.arg for x in inscols]) | 315 args = ','.join([self.arg for x in inscols]) |
| 309 cols = ','.join(inscols) | 316 cols = ','.join(inscols) |
| 310 sql = 'insert into _%s (%s) values (%s)'%(cn, cols, args) | 317 sql = 'insert into _%s (%s) values (%s)' % (cn, cols, args) |
| 311 for entry in olddata: | 318 for entry in olddata: |
| 312 d = [] | 319 d = [] |
| 313 retired_id = None | 320 retired_id = None |
| 314 for name in inscols: | 321 for name in inscols: |
| 315 # generate the new value for the Interval int column | 322 # generate the new value for the Interval int column |
| 316 if name.endswith('_int__'): | 323 if name.endswith('_int__'): |
| 317 name = name[2:-6] | 324 name = name[2:-6] |
| 318 if sqlite_version in (2,3): | 325 if sqlite_version in (2, 3): |
| 319 try: | 326 try: |
| 320 v = hyperdb.Interval(entry[name]).as_seconds() | 327 v = hyperdb.Interval(entry[name]).as_seconds() |
| 321 except IndexError: | 328 except IndexError: |
| 322 v = None | 329 v = None |
| 323 elif name in entry: | 330 elif name in entry: |
| 324 v = hyperdb.Interval(entry[name]).as_seconds() | 331 v = hyperdb.Interval(entry[name]).as_seconds() |
| 325 else: | 332 else: |
| 326 v = None | 333 v = None |
| 327 elif sqlite_version in (2,3): | 334 elif sqlite_version in (2, 3): |
| 328 try: | 335 try: |
| 329 v = entry[name] | 336 v = entry[name] |
| 330 except IndexError: | 337 except IndexError: |
| 331 v = None | 338 v = None |
| 332 elif (sqlite_version == 1 and name in entry): | 339 elif (sqlite_version == 1 and name in entry): |
| 333 v = entry[name] | 340 v = entry[name] |
| 334 else: | 341 else: |
| 335 v = None | 342 v = None |
| 336 if name == 'id': | 343 if name == 'id': |
| 337 retired_id = v | 344 retired_id = v |
| 338 elif name == '__retired__' and retired_id and v not in ['0', 0]: | 345 elif name == '__retired__' and retired_id and \ |
| 346 v not in ['0', 0]: | |
| 339 v = retired_id | 347 v = retired_id |
| 340 d.append(v) | 348 d.append(v) |
| 341 self.sql(sql, tuple(d)) | 349 self.sql(sql, tuple(d)) |
| 342 | 350 |
| 343 return 1 | 351 return 1 |
| 361 except sqlite.ProgrammingError as value: | 369 except sqlite.ProgrammingError as value: |
| 362 if str(value) != 'rollback failed - Connection is closed.': | 370 if str(value) != 'rollback failed - Connection is closed.': |
| 363 raise | 371 raise |
| 364 | 372 |
| 365 def __repr__(self): | 373 def __repr__(self): |
| 366 return '<roundlite 0x%x>'%id(self) | 374 return '<roundlite 0x%x>' % id(self) |
| 367 | 375 |
| 368 def sql_commit(self): | 376 def sql_commit(self): |
| 369 """ Actually commit to the database. | 377 """ Actually commit to the database. |
| 370 | 378 |
| 371 Ignore errors if there's nothing to commit. | 379 Ignore errors if there's nothing to commit. |
| 377 raise | 385 raise |
| 378 # open a new cursor for subsequent work | 386 # open a new cursor for subsequent work |
| 379 self.cursor = self.conn.cursor() | 387 self.cursor = self.conn.cursor() |
| 380 | 388 |
| 381 def sql_index_exists(self, table_name, index_name): | 389 def sql_index_exists(self, table_name, index_name): |
| 382 self.sql('pragma index_list(%s)'%table_name) | 390 self.sql('pragma index_list(%s)' % table_name) |
| 383 for entry in self.cursor.fetchall(): | 391 for entry in self.cursor.fetchall(): |
| 384 if entry[1] == index_name: | 392 if entry[1] == index_name: |
| 385 return 1 | 393 return 1 |
| 386 return 0 | 394 return 0 |
| 387 | 395 |
| 397 # Defeat pysqlite's attempts to do locking by setting | 405 # Defeat pysqlite's attempts to do locking by setting |
| 398 # isolation_level to None. Pysqlite can commit | 406 # isolation_level to None. Pysqlite can commit |
| 399 # on it's own even if we don't want it to end the transaction. | 407 # on it's own even if we don't want it to end the transaction. |
| 400 # If we rewrite to use another sqlite library like apsw we | 408 # If we rewrite to use another sqlite library like apsw we |
| 401 # don't have to deal with this autocommit/autotransact foolishness. | 409 # don't have to deal with this autocommit/autotransact foolishness. |
| 402 self.conn.isolation_level = None; | 410 self.conn.isolation_level = None |
| 403 # Manage the transaction locks manually. | 411 # Manage the transaction locks manually. |
| 404 self.sql("BEGIN IMMEDIATE"); | 412 self.sql("BEGIN IMMEDIATE") |
| 405 | 413 |
| 406 # get the next ID | 414 # get the next ID |
| 407 sql = 'select num from ids where name=%s'%self.arg | 415 sql = 'select num from ids where name=%s' % self.arg |
| 408 self.sql(sql, (classname, )) | 416 self.sql(sql, (classname, )) |
| 409 newid = int(self.cursor.fetchone()[0]) | 417 newid = int(self.cursor.fetchone()[0]) |
| 410 | 418 |
| 411 # leave the next larger number as the next newid | 419 # leave the next larger number as the next newid |
| 412 sql = 'update ids set num=num+1 where name=%s'%self.arg | 420 sql = 'update ids set num=num+1 where name=%s' % self.arg |
| 413 vals = (classname,) | 421 vals = (classname,) |
| 414 self.sql(sql, vals) | 422 self.sql(sql, vals) |
| 415 | 423 |
| 416 # reset pysqlite's auto transact stuff to default since the | 424 # reset pysqlite's auto transact stuff to default since the |
| 417 # rest of the code expects it. | 425 # rest of the code expects it. |
| 418 self.conn.isolation_level = ''; | 426 self.conn.isolation_level = '' |
| 419 # commit writing the data, clearing locks for other processes | 427 # commit writing the data, clearing locks for other processes |
| 420 # and create a new cursor to the database. | 428 # and create a new cursor to the database. |
| 421 self.sql_commit(); | 429 self.sql_commit() |
| 422 | 430 |
| 423 # return as string | 431 # return as string |
| 424 return str(newid) | 432 return str(newid) |
| 425 | 433 |
| 426 def setid(self, classname, setid): | 434 def setid(self, classname, setid): |
| 427 """ Set the id counter: used during import of database | 435 """ Set the id counter: used during import of database |
| 428 | 436 |
| 429 We add one to make it behave like the sequences in postgres. | 437 We add one to make it behave like the sequences in postgres. |
| 430 """ | 438 """ |
| 431 sql = 'update ids set num=%s where name=%s'%(self.arg, self.arg) | 439 sql = 'update ids set num=%s where name=%s' % (self.arg, self.arg) |
| 432 vals = (int(setid)+1, classname) | 440 vals = (int(setid)+1, classname) |
| 433 self.sql(sql, vals) | 441 self.sql(sql, vals) |
| 434 | 442 |
| 435 def clear(self): | 443 def clear(self): |
| 436 rdbms_common.Database.clear(self) | 444 rdbms_common.Database.clear(self) |
| 438 for cn in self.classes.keys(): | 446 for cn in self.classes.keys(): |
| 439 self.setid(cn, 0) | 447 self.setid(cn, 0) |
| 440 | 448 |
| 441 def create_class(self, spec): | 449 def create_class(self, spec): |
| 442 rdbms_common.Database.create_class(self, spec) | 450 rdbms_common.Database.create_class(self, spec) |
| 443 sql = 'insert into ids (name, num) values (%s, %s)'%(self.arg, self.arg) | 451 sql = 'insert into ids (name, num) values (%s, %s)' %( |
| 452 self.arg, self.arg) | |
| 444 vals = (spec.classname, 1) | 453 vals = (spec.classname, 1) |
| 445 self.sql(sql, vals) | 454 self.sql(sql, vals) |
| 446 | 455 |
| 447 if sqlite_version in (2,3): | 456 if sqlite_version in (2, 3): |
| 448 def load_journal(self, classname, cols, nodeid): | 457 def load_journal(self, classname, cols, nodeid): |
| 449 """We need to turn the sqlite3.Row into a tuple so it can be | 458 """We need to turn the sqlite3.Row into a tuple so it can be |
| 450 unpacked""" | 459 unpacked""" |
| 451 l = rdbms_common.Database.load_journal(self, | 460 l = rdbms_common.Database.load_journal(self, |
| 452 classname, cols, nodeid) | 461 classname, cols, nodeid) |
| 453 cols = range(5) | 462 cols = range(5) |
| 454 return [[row[col] for col in cols] for row in l] | 463 return [[row[col] for col in cols] for row in l] |
| 464 | |
| 455 | 465 |
| 456 class sqliteClass: | 466 class sqliteClass: |
| 457 def filter(self, *args, **kw): | 467 def filter(self, *args, **kw): |
| 458 """ If there's NO matches to a fetch, sqlite returns NULL | 468 """ If there's NO matches to a fetch, sqlite returns NULL |
| 459 instead of nothing | 469 instead of nothing |
| 460 """ | 470 """ |
| 461 return [f for f in rdbms_common.Class.filter(self, *args, **kw) if f] | 471 return [f for f in rdbms_common.Class.filter(self, *args, **kw) if f] |
| 462 | 472 |
| 473 | |
| 463 class Class(sqliteClass, rdbms_common.Class): | 474 class Class(sqliteClass, rdbms_common.Class): |
| 464 pass | 475 pass |
| 465 | 476 |
| 477 | |
| 466 class IssueClass(sqliteClass, rdbms_common.IssueClass): | 478 class IssueClass(sqliteClass, rdbms_common.IssueClass): |
| 467 pass | 479 pass |
| 468 | 480 |
| 481 | |
| 469 class FileClass(sqliteClass, rdbms_common.FileClass): | 482 class FileClass(sqliteClass, rdbms_common.FileClass): |
| 470 pass | 483 pass |
| 471 | 484 |
| 472 # vim: set et sts=4 sw=4 : | 485 # vim: set et sts=4 sw=4 : |
