Mercurial > p > roundup > code
comparison roundup/backends/back_sqlite.py @ 4357:13b3155869e0
Beginnings of a big code cleanup / modernisation to make 2to3 happy
| author | Richard Jones <richard@users.sourceforge.net> |
|---|---|
| date | Mon, 22 Feb 2010 05:26:57 +0000 |
| parents | 289f249ba192 |
| children | 3e35233ea93c |
comparison
equal
deleted
inserted
replaced
| 4356:05a65559d873 | 4357:13b3155869e0 |
|---|---|
| 158 | 158 |
| 159 def add_new_columns_v2(self): | 159 def add_new_columns_v2(self): |
| 160 # update existing tables to have the new actor column | 160 # update existing tables to have the new actor column |
| 161 tables = self.database_schema['tables'] | 161 tables = self.database_schema['tables'] |
| 162 for classname, spec in self.classes.items(): | 162 for classname, spec in self.classes.items(): |
| 163 if tables.has_key(classname): | 163 if classname in tables: |
| 164 dbspec = tables[classname] | 164 dbspec = tables[classname] |
| 165 self.update_class(spec, dbspec, force=1, adding_v2=1) | 165 self.update_class(spec, dbspec, force=1, adding_v2=1) |
| 166 # we've updated - don't try again | 166 # we've updated - don't try again |
| 167 tables[classname] = spec.schema() | 167 tables[classname] = spec.schema() |
| 168 | 168 |
| 177 If 'force' is true, update the database anyway. | 177 If 'force' is true, update the database anyway. |
| 178 | 178 |
| 179 SQLite doesn't have ALTER TABLE, so we have to copy and | 179 SQLite doesn't have ALTER TABLE, so we have to copy and |
| 180 regenerate the tables with the new schema. | 180 regenerate the tables with the new schema. |
| 181 """ | 181 """ |
| 182 new_has = spec.properties.has_key | |
| 183 new_spec = spec.schema() | 182 new_spec = spec.schema() |
| 184 new_spec[1].sort() | 183 new_spec[1].sort() |
| 185 old_spec[1].sort() | 184 old_spec[1].sort() |
| 186 if not force and new_spec == old_spec: | 185 if not force and new_spec == old_spec: |
| 187 # no changes | 186 # no changes |
| 191 | 190 |
| 192 # detect multilinks that have been removed, and drop their table | 191 # detect multilinks that have been removed, and drop their table |
| 193 old_has = {} | 192 old_has = {} |
| 194 for name, prop in old_spec[1]: | 193 for name, prop in old_spec[1]: |
| 195 old_has[name] = 1 | 194 old_has[name] = 1 |
| 196 if new_has(name) or not isinstance(prop, hyperdb.Multilink): | 195 if name in spec.properties or not isinstance(prop, hyperdb.Multilink): |
| 197 continue | 196 continue |
| 198 # it's a multilink, and it's been removed - drop the old | 197 # it's a multilink, and it's been removed - drop the old |
| 199 # table. First drop indexes. | 198 # table. First drop indexes. |
| 200 self.drop_multilink_table_indexes(spec.classname, name) | 199 self.drop_multilink_table_indexes(spec.classname, name) |
| 201 sql = 'drop table %s_%s'%(spec.classname, prop) | 200 sql = 'drop table %s_%s'%(spec.classname, prop) |
| 202 self.sql(sql) | 201 self.sql(sql) |
| 203 old_has = old_has.has_key | |
| 204 | 202 |
| 205 # now figure how we populate the new table | 203 # now figure how we populate the new table |
| 206 if adding_v2: | 204 if adding_v2: |
| 207 fetch = ['_activity', '_creation', '_creator'] | 205 fetch = ['_activity', '_creation', '_creator'] |
| 208 else: | 206 else: |
| 209 fetch = ['_actor', '_activity', '_creation', '_creator'] | 207 fetch = ['_actor', '_activity', '_creation', '_creator'] |
| 210 properties = spec.getprops() | 208 properties = spec.getprops() |
| 211 for propname,x in new_spec[1]: | 209 for propname,x in new_spec[1]: |
| 212 prop = properties[propname] | 210 prop = properties[propname] |
| 213 if isinstance(prop, hyperdb.Multilink): | 211 if isinstance(prop, hyperdb.Multilink): |
| 214 if not old_has(propname): | 212 if propname not in old_has: |
| 215 # we need to create the new table | 213 # we need to create the new table |
| 216 self.create_multilink_table(spec, propname) | 214 self.create_multilink_table(spec, propname) |
| 217 elif force: | 215 elif force: |
| 218 tn = '%s_%s'%(spec.classname, propname) | 216 tn = '%s_%s'%(spec.classname, propname) |
| 219 # grabe the current values | 217 # grabe the current values |
| 230 self.create_multilink_table(spec, propname) | 228 self.create_multilink_table(spec, propname) |
| 231 sql = """insert into %s (linkid, nodeid) values | 229 sql = """insert into %s (linkid, nodeid) values |
| 232 (%s, %s)"""%(tn, self.arg, self.arg) | 230 (%s, %s)"""%(tn, self.arg, self.arg) |
| 233 for linkid, nodeid in rows: | 231 for linkid, nodeid in rows: |
| 234 self.sql(sql, (int(linkid), int(nodeid))) | 232 self.sql(sql, (int(linkid), int(nodeid))) |
| 235 elif old_has(propname): | 233 elif propname in old_has: |
| 236 # we copy this col over from the old table | 234 # we copy this col over from the old table |
| 237 fetch.append('_'+propname) | 235 fetch.append('_'+propname) |
| 238 | 236 |
| 239 # select the data out of the old table | 237 # select the data out of the old table |
| 240 fetch.append('id') | 238 fetch.append('id') |
| 261 if isinstance(prop, hyperdb.Multilink): | 259 if isinstance(prop, hyperdb.Multilink): |
| 262 continue | 260 continue |
| 263 elif isinstance(prop, hyperdb.Interval): | 261 elif isinstance(prop, hyperdb.Interval): |
| 264 inscols.append('_'+propname) | 262 inscols.append('_'+propname) |
| 265 inscols.append('__'+propname+'_int__') | 263 inscols.append('__'+propname+'_int__') |
| 266 elif old_has(propname): | 264 elif propname in old_has: |
| 267 # we copy this col over from the old table | 265 # we copy this col over from the old table |
| 268 inscols.append('_'+propname) | 266 inscols.append('_'+propname) |
| 269 | 267 |
| 270 # do the insert of the old data - the new columns will have | 268 # do the insert of the old data - the new columns will have |
| 271 # NULL values | 269 # NULL values |
| 281 if sqlite_version in (2,3): | 279 if sqlite_version in (2,3): |
| 282 try: | 280 try: |
| 283 v = hyperdb.Interval(entry[name]).as_seconds() | 281 v = hyperdb.Interval(entry[name]).as_seconds() |
| 284 except IndexError: | 282 except IndexError: |
| 285 v = None | 283 v = None |
| 286 elif entry.has_key(name): | 284 elif name in entry: |
| 287 v = hyperdb.Interval(entry[name]).as_seconds() | 285 v = hyperdb.Interval(entry[name]).as_seconds() |
| 288 else: | 286 else: |
| 289 v = None | 287 v = None |
| 290 elif sqlite_version in (2,3): | 288 elif sqlite_version in (2,3): |
| 291 try: | 289 try: |
| 292 v = entry[name] | 290 v = entry[name] |
| 293 except IndexError: | 291 except IndexError: |
| 294 v = None | 292 v = None |
| 295 elif (sqlite_version == 1 and entry.has_key(name)): | 293 elif (sqlite_version == 1 and name in entry): |
| 296 v = entry[name] | 294 v = entry[name] |
| 297 else: | 295 else: |
| 298 v = None | 296 v = None |
| 299 d.append(v) | 297 d.append(v) |
| 300 self.sql(sql, tuple(d)) | 298 self.sql(sql, tuple(d)) |
| 395 def filter(self, search_matches, filterspec, sort=(None,None), | 393 def filter(self, search_matches, filterspec, sort=(None,None), |
| 396 group=(None,None)): | 394 group=(None,None)): |
| 397 """ If there's NO matches to a fetch, sqlite returns NULL | 395 """ If there's NO matches to a fetch, sqlite returns NULL |
| 398 instead of nothing | 396 instead of nothing |
| 399 """ | 397 """ |
| 400 return filter(None, rdbms_common.Class.filter(self, search_matches, | 398 return [f for f in rdbms_common.Class.filter(self, search_matches, |
| 401 filterspec, sort=sort, group=group)) | 399 filterspec, sort=sort, group=group) if f] |
| 402 | 400 |
| 403 class Class(sqliteClass, rdbms_common.Class): | 401 class Class(sqliteClass, rdbms_common.Class): |
| 404 pass | 402 pass |
| 405 | 403 |
| 406 class IssueClass(sqliteClass, rdbms_common.IssueClass): | 404 class IssueClass(sqliteClass, rdbms_common.IssueClass): |
